diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S --- a/sys/amd64/amd64/support.S +++ b/sys/amd64/amd64/support.S @@ -41,6 +41,16 @@ #include "assym.inc" + .data + .globl ticksl + .type ticksl, @object + .globl ticks + .type ticks, @object +ticksl: .long 0 + .size ticksl, 8 +ticks =ticksl + .size ticks, 4 + .text /* Address: %rdi */ diff --git a/sys/i386/i386/support.S b/sys/i386/i386/support.S --- a/sys/i386/i386/support.S +++ b/sys/i386/i386/support.S @@ -36,6 +36,16 @@ #define IDXSHIFT 10 + .data + .globl ticksl + .type ticksl, @object + .globl ticks + .type ticks, @object +ticksl: .long 0 + .size ticksl, 4 +ticks =ticksl + .size ticks, 4 + .text ENTRY(sse2_pagezero) diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -323,7 +323,7 @@ #include -static int watchdog_ticks; +static long watchdog_ticks; static int watchdog_enabled; static void watchdog_fire(void); static void watchdog_config(void *, u_int, int *); @@ -369,10 +369,9 @@ int stathz; int profhz; int profprocs; -volatile int ticks; int psratio; -DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */ +DPCPU_DEFINE_STATIC(long, pcputicks); /* Per-CPU version of ticks. */ #ifdef DEVICE_POLLING static int devpoll_run = 0; #endif @@ -480,14 +479,14 @@ struct pstats *pstats; struct thread *td = curthread; struct proc *p = td->td_proc; - int *t = DPCPU_PTR(pcputicks); - int global, i, newticks; + long global, i, newticks, *t; /* * Update per-CPU and possibly global ticks values. */ + t = DPCPU_PTR(pcputicks); *t += cnt; - global = ticks; + global = atomic_load_long(&ticksl); do { newticks = *t - global; if (newticks <= 0) { @@ -496,7 +495,7 @@ newticks = 0; break; } - } while (!atomic_fcmpset_int(&ticks, &global, *t)); + } while (!atomic_fcmpset_long(&ticksl, &global, *t)); /* * Run current process's virtual and profile time, as needed. @@ -525,7 +524,7 @@ } #endif /* DEVICE_POLLING */ if (watchdog_enabled > 0) { - i = atomic_fetchadd_int(&watchdog_ticks, -newticks); + i = atomic_fetchadd_long(&watchdog_ticks, -newticks); if (i > 0 && i <= newticks) watchdog_fire(); } @@ -540,11 +539,12 @@ void hardclock_sync(int cpu) { - int *t; + long *t; + KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); - t = DPCPU_ID_PTR(cpu, pcputicks); - *t = ticks; + t = DPCPU_ID_PTR(cpu, pcputicks); + *t = ticksl; } /* diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c --- a/sys/kern/subr_param.c +++ b/sys/kern/subr_param.c @@ -193,11 +193,13 @@ tick_bt = sbttobt(tick_sbt); tick_seconds_max = INT_MAX / hz; +#if LONG_MAX == 0x7fffffffL /* * Arrange for ticks to wrap 10 minutes after boot to help catch - * sign problems sooner. + * sign problems sooner on platforms where ticksl is 32 bits wide. */ - ticks = INT_MAX - (hz * 10 * 60); + ticksl = LONG_MAX - (hz * 10 * 60); +#endif vn_lock_pair_pause_max = hz / 100; if (vn_lock_pair_pause_max == 0) diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h --- a/sys/sys/kernel.h +++ b/sys/sys/kernel.h @@ -65,7 +65,17 @@ extern int stathz; /* statistics clock's frequency */ extern int profhz; /* profiling clock's frequency */ extern int profprocs; /* number of process's profiling */ + +/* + * The ticks and ticksl symbols overlap, giving a 64-bit tick counter on 64-bit + * platforms while still maintaining compatibility with the legacy 32-bit + * counter. Either can be used; 32-bit values of course require less space, but + * rollover must be handled: the value can roll over in a little under 25 days + * at 1000Hz. In general it's better to use ticksl unless there's a specific + * need to do otherwise. + */ extern volatile int ticks; +extern volatile long ticksl; #endif /* _KERNEL */