diff --git a/head/sys/kern/kern_tc.c b/head/sys/kern/kern_tc.c --- a/head/sys/kern/kern_tc.c +++ b/head/sys/kern/kern_tc.c @@ -2094,20 +2094,14 @@ * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply * before divide conversion (to retain precision) we find that the * margin shrinks to 1.5 hours (one millionth of 146y). - * With a three prong approach we never lose significant bits, no - * matter what the cputick rate and length of timeinterval is. */ uint64_t cputick2usec(uint64_t tick) { - - if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ - return (tick / (cpu_tickrate() / 1000000LL)); - else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ - return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); - else - return ((tick * 1000000LL) / cpu_tickrate()); + uint64_t tr; + tr = cpu_tickrate(); + return ((tick / tr) * 1000000ULL) + ((tick % tr) * 1000000ULL) / tr; } cpu_tick_f *cpu_ticks = tc_cpu_ticks; diff --git a/head/sys/kern/kern_time.c b/head/sys/kern/kern_time.c --- a/head/sys/kern/kern_time.c +++ b/head/sys/kern/kern_time.c @@ -243,9 +243,10 @@ static inline void cputick2timespec(uint64_t runtime, struct timespec *ats) { - runtime = cputick2usec(runtime); - ats->tv_sec = runtime / 1000000; - ats->tv_nsec = runtime % 1000000 * 1000; + uint64_t tr; + tr = cpu_tickrate(); + ats->tv_sec = runtime / tr; + ats->tv_nsec = ((runtime % tr) * 1000000000ULL) / tr; } void @@ -474,10 +475,7 @@ case CLOCK_THREAD_CPUTIME_ID: case CLOCK_PROCESS_CPUTIME_ID: cputime: - /* sync with cputick2usec */ - ts->tv_nsec = 1000000 / cpu_tickrate(); - if (ts->tv_nsec == 0) - ts->tv_nsec = 1000; + ts->tv_nsec = 1000000000 / cpu_tickrate() + 1; break; default: if ((int)clock_id < 0)