diff --git a/sys/compat/freebsd32/freebsd32_misc.c b/sys/compat/freebsd32/freebsd32_misc.c --- a/sys/compat/freebsd32/freebsd32_misc.c +++ b/sys/compat/freebsd32/freebsd32_misc.c @@ -4115,10 +4115,10 @@ CP(cest32, cest, leapsec_total); CP(cest32, cest, leapsec_next); - mtx_lock(&ffclock_mtx); + mtx_lock_spin(&ffclock_mtx); memcpy(&ffclock_estimate, &cest, sizeof(struct ffclock_estimate)); ffclock_updated++; - mtx_unlock(&ffclock_mtx); + mtx_unlock_spin(&ffclock_mtx); return (error); } @@ -4130,9 +4130,9 @@ struct ffclock_estimate32 cest32; int error; - mtx_lock(&ffclock_mtx); + mtx_lock_spin(&ffclock_mtx); memcpy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate)); - mtx_unlock(&ffclock_mtx); + mtx_unlock_spin(&ffclock_mtx); CP(cest.update_time, cest32.update_time, sec); memcpy(&cest32.update_time.frac, &cest.update_time.frac, sizeof(uint64_t)); diff --git a/sys/kern/kern_ffclock.c b/sys/kern/kern_ffclock.c --- a/sys/kern/kern_ffclock.c +++ b/sys/kern/kern_ffclock.c @@ -101,7 +101,7 @@ bt->sec -= cest.leapsec_next; } - /* Boot time adjustment, for uptime/monotonic clocks. */ + /* Uptime clock case, obtain from UTC via boottime UTC timestamp. */ if ((flags & FFCLOCK_UPTIME) == FFCLOCK_UPTIME) { bintime_sub(bt, &ffclock_boottime); } @@ -399,9 +399,9 @@ } /* - * System call allowing the synchronisation daemon to push new feedforward clock - * estimates to the kernel. Acquire ffclock_mtx to prevent concurrent updates - * and ensure data consistency. + * System call allowing the synchronisation daemon to push new feedforward + * clock estimates to the kernel. Acquire ffclock_mtx to prevent concurrent + * updates and ensure data consistency. * NOTE: ffclock_updated signals the fftimehands that new estimates are * available. The updated estimates are picked up by the fftimehands on next * tick, which could take as long as 1/hz seconds (if ticks are not missed). @@ -426,10 +426,11 @@ != 0) return (error); - mtx_lock(&ffclock_mtx); + mtx_lock_spin(&ffclock_mtx); memcpy(&ffclock_estimate, &cest, sizeof(struct ffclock_estimate)); ffclock_updated++; - mtx_unlock(&ffclock_mtx); + mtx_unlock_spin(&ffclock_mtx); + return (error); } @@ -450,9 +451,9 @@ struct ffclock_estimate cest; int error; - mtx_lock(&ffclock_mtx); + mtx_lock_spin(&ffclock_mtx); memcpy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate)); - mtx_unlock(&ffclock_mtx); + mtx_unlock_spin(&ffclock_mtx); error = copyout(&cest, uap->cest, sizeof(struct ffclock_estimate)); return (error); } diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c --- a/sys/kern/kern_tc.c +++ b/sys/kern/kern_tc.c @@ -557,45 +557,7 @@ ffclock_updated = 0; ffclock_status = FFCLOCK_STA_UNSYNC; ffclock_boottime.sec = ffclock_boottime.frac = 0; - mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF); -} - -/* - * Reset the feedforward clock estimates. Called from inittodr() to get things - * kick started and uses the timecounter nominal frequency as a first period - * estimate. Note: this function may be called several time just after boot. - * Note: this is the only function that sets the value of boot time for the - * monotonic (i.e. uptime) version of the feedforward clock. - */ -void -ffclock_reset_clock(struct timespec *ts) -{ - struct timecounter *tc; - struct ffclock_estimate cest; - - tc = timehands->th_counter; - memset(&cest, 0, sizeof(struct ffclock_estimate)); - - timespec2bintime(ts, &ffclock_boottime); - timespec2bintime(ts, &(cest.update_time)); - ffclock_read_counter(&cest.update_ffcount); - cest.secs_to_nextupdate = 0; - cest.period = ((1ULL << 63) / tc->tc_frequency) << 1; - cest.errb_abs = 0; - cest.errb_rate = 0; - cest.status = FFCLOCK_STA_UNSYNC; - cest.leapsec_expected = 0; - cest.leapsec_total = 0; - cest.leapsec_next = 0; - - mtx_lock(&ffclock_mtx); - memcpy(&ffclock_estimate, &cest, sizeof(struct ffclock_estimate)); - ffclock_updated = INT8_MAX; - mtx_unlock(&ffclock_mtx); - - printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name, - (unsigned long long)tc->tc_frequency, (long)ts->tv_sec, - (unsigned long)ts->tv_nsec); + mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_SPIN); } /* @@ -639,13 +601,19 @@ * * The instant defining the start of the new tick is the delta=tc_delta call * from tc_windup. This is simply mirrored here in the FF counter `read'. + * + * If a RTC reset occurs, then tc_windup is called within tc_setclock with a + * bootime argument, passed here as reset_FBbootime. If non-NULL, FFclocks are + * reset using this and the UTC reset calculated in tc_windup, and FFdata is + * reinitialized to basic values. */ static void -ffclock_windup(unsigned int delta) +ffclock_windup(unsigned int delta, struct bintime *reset_FBbootime, + struct bintime *reset_UTC) { struct ffclock_estimate *cest; struct fftimehands *ffth; - struct bintime bt, gap; + struct bintime bt, gap, upt; ffcounter ffdelta; uint64_t frac; uint8_t forward_jump, ogen; @@ -661,10 +629,99 @@ ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta; /* - * No acceptable update in FFclock parameters to process. Tick + * RTC reset: reset all FFclocks, and the daemon natFFC data. + * The period is initialized only if needed. + */ + if (reset_FBbootime) { + /* Acceptable to ignore a potentially pending update here. */ + memcpy(cest, &fftimehands->cest,sizeof(struct ffclock_estimate)); + + /* + * Set value of ffclock_boottime to maximize Upclock continuity. + * sysclock = FB : kernel won't see a jump now, align FF and + * FB to minimize jump if sysclock changes + * = FF : ensure cont'y in FF and hence sysclock Uptime + */ + if (sysclock_active == SYSCLOCK_FB) + ffclock_boottime = *reset_FBbootime; + else { + /* First calculate what monoFFC would have been. */ + ffth->tick_time_mono = fftimehands->tick_time_mono; + ffclock_convert_delta(ffdelta, ffth->period_mono, &bt); + bintime_add(&ffth->tick_time_mono, &bt); + + /* Cancel out jump in Uptime due to reset. */ + bintime_clear(&gap); + if (bintime_cmp(reset_UTC, &ffth->tick_time_mono, >)) { + gap = *reset_UTC; + bintime_sub(&gap, &ffth->tick_time_mono); + bintime_add(&ffclock_boottime, &gap); + } else { + gap = ffth->tick_time_mono; + bintime_sub(&gap, reset_UTC); + bintime_sub(&ffclock_boottime, &gap); + } + } + + /* Align UTC clocks to the FB reset via the RTC reset. */ + ffth->tick_time = *reset_UTC; + ffth->tick_time_mono = *reset_UTC; + ffth->tick_time_diff = *reset_UTC; + + /* Reset natFFC to reflect the reset, effected at tick-start. */ + cest->update_time = *reset_UTC; + cest->update_ffcount = ffth->tick_ffcount; + if (cest->period == 0) // if never set + cest->period = ((1ULL << 63)/ \ + timehands->th_counter->tc_frequency) << 1; + + cest->errb_abs = 0; + cest->errb_rate = 0; + cest->status = FFCLOCK_STA_UNSYNC; + cest->secs_to_nextupdate = 0; // no daemon update since reset + cest->leapsec_expected = 0; + cest->leapsec_total = 0; + cest->leapsec_next = 0; + mtx_lock_spin(&ffclock_mtx); + memcpy(&ffclock_estimate, cest,sizeof(struct ffclock_estimate)); + ffclock_updated = 0; // signal no daemon update to process + mtx_unlock_spin(&ffclock_mtx); + + ffclock_status = FFCLOCK_STA_UNSYNC; + + /* Reset remaining fftimehands members. */ + ffth->tick_error.sec = ffth->tick_error.frac = 0; + ffth->period_mono = cest->period; + + upt = ffth->tick_time_mono; + bintime_sub(&upt, &ffclock_boottime); + printf("FFclock processing RTC reset: UTC: %lld.%03u" + " boottime: %llu.%03u, uptime: %llu.%03u\n", + (long long)ffth->tick_time_mono.sec, + (unsigned int)(ffth->tick_time_mono.frac / MS_AS_BINFRAC), + (unsigned long long)ffclock_boottime.sec, + (unsigned int)(ffclock_boottime.frac / MS_AS_BINFRAC), + (unsigned long long)upt.sec, + (unsigned int)(upt.frac / MS_AS_BINFRAC) ); + + } + + /* + * Signal to ignore a stale daemon update following a RTC reset. + */ + if (ffclock_updated > 0 && fftimehands->cest.secs_to_nextupdate == 0 + && bintime_cmp(&fftimehands->cest.update_time, + &ffclock_estimate.update_time,>) ) { + ffclock_updated = 0; + printf("Ignoring stale natFFC update following RTC reset.\n"); + } + + /* + * No acceptable update in FFclock parameters to process. Includes case + * of daemon update following a RTC reset that must be ignored. Tick * state update based on copy or simple projection from previous tick. */ - if (ffclock_updated <= 0) { + if (ffclock_updated <= 0 && reset_FBbootime == NULL) { /* Update natFFC members {cest, tick_time{_diff}, tick_error} */ memcpy(cest, &fftimehands->cest,sizeof(struct ffclock_estimate)); @@ -722,18 +779,13 @@ bintime_add(&ffth->tick_time_diff, &bt); /* - * Update monoFFC member tick_time_mono, standard case. - * ffclock_updated by ffclock_setto_rtc : re-initialize - * ffclock_updated by daemon : ensure continuity across ticks + * Update monoFFC member tick_time_mono, standard case, + * ensuring continuity over ticks. */ - if (ffclock_updated == INT8_MAX) // set by ffclock_reset_clock - ffth->tick_time_mono = ffth->tick_time; - else { - ffth->tick_time_mono = fftimehands->tick_time_mono; - ffclock_convert_delta((ffcounter)delta, - fftimehands->period_mono, &bt); - bintime_add(&ffth->tick_time_mono, &bt); - } + ffth->tick_time_mono = fftimehands->tick_time_mono; + ffclock_convert_delta((ffcounter)delta, + fftimehands->period_mono, &bt); + bintime_add(&ffth->tick_time_mono, &bt); /* Record direction of jump between monoFFC and natFFC. */ if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_mono, >)) @@ -760,9 +812,8 @@ * ffclock_boottime to ensure continuity of uptime functions. * If the jump is forward, then monoFFC remains monotonic. */ - if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) && - ((cest->status & FFCLOCK_STA_UNSYNC) == 0) && - ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) { + if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) + && ((cest->status & FFCLOCK_STA_UNSYNC) == 0) ) { if (forward_jump) { printf("ffwindup: forward"); bintime_add(&ffclock_boottime, &gap); @@ -773,6 +824,12 @@ printf(" jump for monoFFclock of %llu.%03u", (unsigned long long)gap.sec, (unsigned int)(gap.frac / MS_AS_BINFRAC) ); + + upt = ffth->tick_time_mono; + bintime_sub(&upt, &ffclock_boottime); + printf(" (uptime preserved at: %llu.%03u)\n", + (unsigned long long)upt.sec, + (unsigned int)(upt.frac / MS_AS_BINFRAC) ); ffth->tick_time_mono = ffth->tick_time; /* Signal nothing to do to period_mono algo below. */ @@ -893,8 +950,11 @@ ffth->tick_time_mono = fftimehands->tick_time_mono; ffth->period_mono = cest->period; - /* Do not lock but ignore next update from synchronization daemon. */ - ffclock_updated--; + /* Push the reset natFFC data to the global variable. */ + mtx_lock_spin(&ffclock_mtx); + memcpy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate)); + ffclock_updated--; // ensure next daemon update will be ignored + mtx_unlock_spin(&ffclock_mtx); if (++ogen == 0) ogen = 1; @@ -1538,9 +1598,7 @@ ncount = tc->tc_get_timecount(tc); else ncount = 0; -#ifdef FFCLOCK - ffclock_windup(delta); -#endif + th->th_offset_count += delta; th->th_offset_count &= th->th_counter->tc_counter_mask; bintime_add_tc_delta(&th->th_offset, th->th_scale, @@ -1568,6 +1626,9 @@ */ bt = th->th_offset; bintime_add(&bt, &th->th_boottime); +#ifdef FFCLOCK + ffclock_windup(delta, new_boottimebin, &bt); +#endif i = bt.sec - tho->th_microtime.tv_sec; if (i > 0) { if (i > LARGE_STEP) diff --git a/sys/kern/subr_rtc.c b/sys/kern/subr_rtc.c --- a/sys/kern/subr_rtc.c +++ b/sys/kern/subr_rtc.c @@ -55,7 +55,6 @@ */ #include -#include "opt_ffclock.h" #include #include @@ -67,9 +66,6 @@ #include #include #include -#ifdef FFCLOCK -#include -#endif #include #include "clock_if.h" @@ -358,12 +354,9 @@ ts.tv_nsec = 0; } - if (ts.tv_sec >= 0) { + if (ts.tv_sec >= 0) tc_setclock(&ts); -#ifdef FFCLOCK - ffclock_reset_clock(&ts); -#endif - } + } /* diff --git a/sys/sys/timeffc.h b/sys/sys/timeffc.h --- a/sys/sys/timeffc.h +++ b/sys/sys/timeffc.h @@ -170,9 +170,6 @@ int sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt, int clockfamily, uint32_t flags); -/* Resets feedforward clock from RTC */ -void ffclock_reset_clock(struct timespec *ts); - /* * Return the current value of the feedforward clock counter. Essential to * measure time interval in counter units. If a fast timecounter is used by the