diff --git a/sys/conf/files b/sys/conf/files --- a/sys/conf/files +++ b/sys/conf/files @@ -3932,6 +3932,7 @@ kern/subr_stats.c optional stats kern/subr_taskqueue.c standard kern/subr_terminal.c optional vt +kern/subr_ticks.s standard kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -323,7 +323,7 @@ #include -static int watchdog_ticks; +static long watchdog_ticks; static int watchdog_enabled; static void watchdog_fire(void); static void watchdog_config(void *, u_int, int *); @@ -369,10 +369,9 @@ int stathz; int profhz; int profprocs; -volatile int ticks; int psratio; -DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */ +DPCPU_DEFINE_STATIC(long, pcputicks); /* Per-CPU version of ticks. */ #ifdef DEVICE_POLLING static int devpoll_run = 0; #endif @@ -480,14 +479,14 @@ struct pstats *pstats; struct thread *td = curthread; struct proc *p = td->td_proc; - int *t = DPCPU_PTR(pcputicks); - int global, i, newticks; + long global, newticks, *t; /* * Update per-CPU and possibly global ticks values. */ + t = DPCPU_PTR(pcputicks); *t += cnt; - global = ticks; + global = atomic_load_long(&ticksl); do { newticks = *t - global; if (newticks <= 0) { @@ -496,7 +495,7 @@ newticks = 0; break; } - } while (!atomic_fcmpset_int(&ticks, &global, *t)); + } while (!atomic_fcmpset_long(&ticksl, &global, *t)); /* * Run current process's virtual and profile time, as needed. @@ -525,8 +524,10 @@ } #endif /* DEVICE_POLLING */ if (watchdog_enabled > 0) { - i = atomic_fetchadd_int(&watchdog_ticks, -newticks); - if (i > 0 && i <= newticks) + long left; + + left = atomic_fetchadd_long(&watchdog_ticks, -newticks); + if (left > 0 && left <= newticks) watchdog_fire(); } intr_event_handle(clk_intr_event, NULL); @@ -540,11 +541,12 @@ void hardclock_sync(int cpu) { - int *t; + long *t; + KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); - t = DPCPU_ID_PTR(cpu, pcputicks); - *t = ticks; + t = DPCPU_ID_PTR(cpu, pcputicks); + *t = ticksl; } /* diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c --- a/sys/kern/kern_tc.c +++ b/sys/kern/kern_tc.c @@ -1916,9 +1916,9 @@ "Approximate number of hardclock ticks in a millisecond"); void -tc_ticktock(int cnt) +tc_ticktock(long cnt) { - static int count; + static long count; if (mtx_trylock_spin(&tc_setclock_mtx)) { count += cnt; diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c --- a/sys/kern/subr_param.c +++ b/sys/kern/subr_param.c @@ -195,9 +195,9 @@ /* * Arrange for ticks to wrap 10 minutes after boot to help catch - * sign problems sooner. + * sign problems sooner on platforms where ticksl is 32 bits wide. */ - ticks = INT_MAX - (hz * 10 * 60); + ticksl = INT_MAX - (hz * 10 * 60); vn_lock_pair_pause_max = hz / 100; if (vn_lock_pair_pause_max == 0) diff --git a/sys/kern/subr_ticks.s b/sys/kern/subr_ticks.s new file mode 100644 --- /dev/null +++ b/sys/kern/subr_ticks.s @@ -0,0 +1,44 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Mark Johnston + */ + +/* + * Define the "ticks" and "ticksl" variables. The former is overlaid onto the + * low bits of the latter. + */ + +#if defined(__aarch64__) +#include +#include + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) +#endif + +#ifdef _ILP32 +#define SIZEOF_TICKSL 4 +#define TICKSL_INIT .long 0 +#else +#define SIZEOF_TICKSL 8 +#define TICKSL_INIT .quad 0 +#endif + +#if defined(_ILP32) || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define TICKS_OFFSET 0 +#else +#define TICKS_OFFSET 4 +#endif + + .data + + .global ticksl + .type ticksl, %object + .align SIZEOF_TICKSL +ticksl: TICKSL_INIT + .size ticksl, SIZEOF_TICKSL + + .global ticks + .type ticks, %object +ticks =ticksl + TICKS_OFFSET + .size ticks, 4 diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h --- a/sys/sys/kernel.h +++ b/sys/sys/kernel.h @@ -65,7 +65,15 @@ extern int stathz; /* statistics clock's frequency */ extern int profhz; /* profiling clock's frequency */ extern int profprocs; /* number of process's profiling */ + +/* + * The ticks and ticksl symbols overlap, giving a 64-bit tick counter on 64-bit + * platforms while still maintaining compatibility with the legacy 32-bit + * counter. Either value can be used, but rollover must be handled; at 1000Hz, + * ticks (and ticksl on 32-bit platforms) roll over roughly every 25 days. + */ extern volatile int ticks; +extern volatile long ticksl; #endif /* _KERNEL */ diff --git a/sys/sys/timetc.h b/sys/sys/timetc.h --- a/sys/sys/timetc.h +++ b/sys/sys/timetc.h @@ -87,7 +87,7 @@ u_int64_t tc_getfrequency(void); void tc_init(struct timecounter *tc); void tc_setclock(struct timespec *ts); -void tc_ticktock(int cnt); +void tc_ticktock(long cnt); void cpu_tick_calibration(void); #ifdef SYSCTL_DECL