Page MenuHomeFreeBSD

D31418.id93292.diff
No OneTemporary

D31418.id93292.diff

Index: lib/libc/x86/sys/__vdso_gettc.c
===================================================================
--- lib/libc/x86/sys/__vdso_gettc.c
+++ lib/libc/x86/sys/__vdso_gettc.c
@@ -45,6 +45,7 @@
#include "un-namespace.h"
#include <machine/atomic.h>
#include <machine/cpufunc.h>
+#include <machine/pvclock.h>
#include <machine/specialreg.h>
#include <dev/acpica/acpi_hpet.h>
#ifdef WANT_HYPERV
@@ -312,6 +313,62 @@
#endif /* WANT_HYPERV */
+static struct pvclock_vcpu_time_info *pvclock_timeinfos;
+
+static int
+__vdso_pvclock_gettc(const struct vdso_timehands *th, u_int *tc)
+{
+ uint64_t delta, ns, tsc;
+ struct pvclock_vcpu_time_info *ti;
+ uint32_t cpuid_ti, cpuid_tsc, version;
+ bool stable;
+
+ do {
+ ti = &pvclock_timeinfos[0];
+ version = atomic_load_acq_32(&ti->version);
+ stable = (ti->flags & th->th_x86_pvc_stable_mask) != 0;
+ if (stable) {
+ tsc = rdtscp();
+ } else {
+ (void)rdtscp_aux(&cpuid_ti);
+ ti = &pvclock_timeinfos[cpuid_ti];
+ version = atomic_load_acq_32(&ti->version);
+ tsc = rdtscp_aux(&cpuid_tsc);
+ }
+ delta = tsc - ti->tsc_timestamp;
+ ns = ti->system_time + pvclock_scale_delta(delta,
+ ti->tsc_to_system_mul, ti->tsc_shift);
+ atomic_thread_fence_acq();
+ } while ((ti->version & 1) != 0 || ti->version != version ||
+ (!stable && cpuid_ti != cpuid_tsc));
+ *tc = MAX(ns, th->th_x86_pvc_last_systime);
+ return (0);
+}
+
+static void
+__vdso_init_pvclock_timeinfos(void)
+{
+ struct pvclock_vcpu_time_info *timeinfos;
+ size_t len;
+ int fd, ncpus;
+ unsigned int mode;
+
+ timeinfos = MAP_FAILED;
+ if (_elf_aux_info(AT_NCPUS, &ncpus, sizeof(ncpus)) != 0 ||
+ (cap_getmode(&mode) == 0 && mode != 0) ||
+ (fd = _open("/dev/" PVCLOCK_CDEVNAME, O_RDONLY | O_CLOEXEC)) < 0)
+ goto leave;
+ len = ncpus * sizeof(*pvclock_timeinfos);
+ timeinfos = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
+ _close(fd);
+leave:
+ if (atomic_cmpset_rel_ptr(
+ (volatile uintptr_t *)&pvclock_timeinfos, (uintptr_t)NULL,
+ (uintptr_t)timeinfos) == 0 && timeinfos != MAP_FAILED)
+ (void)munmap((void *)timeinfos, len);
+ return;
+}
+
#pragma weak __vdso_gettc
int
__vdso_gettc(const struct vdso_timehands *th, u_int *tc)
@@ -347,6 +404,12 @@
return (ENOSYS);
return (__vdso_hyperv_tsc(hyperv_ref_tsc, tc));
#endif
+ case VDSO_TH_ALGO_X86_PVCLK:
+ if (pvclock_timeinfos == NULL)
+ __vdso_init_pvclock_timeinfos();
+ if (pvclock_timeinfos == MAP_FAILED)
+ return (ENOSYS);
+ return (__vdso_pvclock_gettc(th, tc));
default:
return (ENOSYS);
}
Index: sys/dev/acpica/acpi_hpet.c
===================================================================
--- sys/dev/acpica/acpi_hpet.c
+++ sys/dev/acpica/acpi_hpet.c
@@ -156,6 +156,8 @@
vdso_th->th_algo = VDSO_TH_ALGO_X86_HPET;
vdso_th->th_x86_shift = 0;
vdso_th->th_x86_hpet_idx = device_get_unit(sc->dev);
+ vdso_th->th_x86_pvc_last_systime = 0;
+ vdso_th->th_x86_pvc_stable_mask = 0;
bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
return (sc->mmap_allow != 0);
}
@@ -171,6 +173,8 @@
vdso_th32->th_algo = VDSO_TH_ALGO_X86_HPET;
vdso_th32->th_x86_shift = 0;
vdso_th32->th_x86_hpet_idx = device_get_unit(sc->dev);
+ vdso_th32->th_x86_pvc_last_systime = 0;
+ vdso_th32->th_x86_pvc_stable_mask = 0;
bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
return (sc->mmap_allow != 0);
}
Index: sys/dev/hyperv/vmbus/amd64/hyperv_machdep.c
===================================================================
--- sys/dev/hyperv/vmbus/amd64/hyperv_machdep.c
+++ sys/dev/hyperv/vmbus/amd64/hyperv_machdep.c
@@ -128,6 +128,8 @@
vdso_th->th_algo = VDSO_TH_ALGO_X86_HVTSC;
vdso_th->th_x86_shift = 0;
vdso_th->th_x86_hpet_idx = 0;
+ vdso_th->th_x86_pvc_last_systime = 0;
+ vdso_th->th_x86_pvc_stable_mask = 0;
bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
return (1);
}
Index: sys/x86/include/pvclock.h
===================================================================
--- sys/x86/include/pvclock.h
+++ sys/x86/include/pvclock.h
@@ -29,6 +29,14 @@
#ifndef X86_PVCLOCK
#define X86_PVCLOCK
+#include <sys/types.h>
+
+#ifdef _KERNEL
+#include <sys/timetc.h>
+#endif /* _KERNEL */
+
+#define PVCLOCK_CDEVNAME "pvclock"
+
struct pvclock_vcpu_time_info {
uint32_t version;
uint32_t pad0;
@@ -43,17 +51,96 @@
#define PVCLOCK_FLAG_TSC_STABLE 0x01
#define PVCLOCK_FLAG_GUEST_PASUED 0x02
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline uint64_t
+pvclock_scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
+{
+ uint64_t product;
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+#if defined(__i386__)
+ {
+ uint32_t tmp1, tmp2;
+
+ /**
+ * For i386, the formula looks like:
+ *
+ * lower = (mul_frac * (delta & UINT_MAX)) >> 32
+ * upper = mul_frac * (delta >> 32)
+ * product = lower + upper
+ */
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
+ "2" (mul_frac) );
+ }
+#elif defined(__amd64__)
+ {
+ unsigned long tmp;
+
+ __asm__ (
+ "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
+ : [lo]"=a" (product), [hi]"=d" (tmp)
+ : "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
+ }
+#else
+#error "pvclock: unsupported x86 architecture?"
+#endif
+ return (product);
+}
+
+#ifdef _KERNEL
+
+typedef struct pvclock_wall_clock *pvclock_get_wallclock_t(void *arg);
+
struct pvclock_wall_clock {
uint32_t version;
uint32_t sec;
uint32_t nsec;
};
+struct pvclock {
+ /* Public; initialized by the caller of 'pvclock_init()': */
+ pvclock_get_wallclock_t *get_wallclock;
+ void *get_wallclock_arg;
+ struct pvclock_vcpu_time_info *timeinfos;
+ bool stable_flag_supported;
+
+ /* Private; initialized by the 'pvclock' API: */
+ bool vdso_force_unstable;
+ struct timecounter tc;
+ struct cdev *cdev;
+};
+
+/*
+ * NOTE: 'pvclock_get_timecount()' and 'pvclock_get_wallclock()' are purely
+ * transitional; they should be removed after 'dev/xen/timer/timer.c' has been
+ * migrated to the 'struct pvclock' API.
+ */
void pvclock_resume(void);
-uint64_t pvclock_get_last_cycles(void);
uint64_t pvclock_tsc_freq(struct pvclock_vcpu_time_info *ti);
uint64_t pvclock_get_timecount(struct pvclock_vcpu_time_info *ti);
void pvclock_get_wallclock(struct pvclock_wall_clock *wc,
struct timespec *ts);
+void pvclock_init(struct pvclock *pvc, device_t dev,
+ const char *tc_name, int tc_quality, u_int tc_flags);
+void pvclock_gettime(struct pvclock *pvc, struct timespec *ts);
+int pvclock_destroy(struct pvclock *pvc);
+
+#endif /* _KERNEL */
+
#endif
Index: sys/x86/include/vdso.h
===================================================================
--- sys/x86/include/vdso.h
+++ sys/x86/include/vdso.h
@@ -37,11 +37,14 @@
#define VDSO_TIMEHANDS_MD \
uint32_t th_x86_shift; \
uint32_t th_x86_hpet_idx; \
- uint32_t th_res[6];
+ uint64_t th_x86_pvc_last_systime;\
+ uint8_t th_x86_pvc_stable_mask; \
+ uint8_t th_res[15];
#define VDSO_TH_ALGO_X86_TSC VDSO_TH_ALGO_1
#define VDSO_TH_ALGO_X86_HPET VDSO_TH_ALGO_2
#define VDSO_TH_ALGO_X86_HVTSC VDSO_TH_ALGO_3 /* Hyper-V ref. TSC */
+#define VDSO_TH_ALGO_X86_PVCLK VDSO_TH_ALGO_4 /* KVM/XEN paravirtual clock */
#ifdef _KERNEL
#ifdef COMPAT_FREEBSD32
Index: sys/x86/x86/pvclock.c
===================================================================
--- sys/x86/x86/pvclock.c
+++ sys/x86/x86/pvclock.c
@@ -31,31 +31,61 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/clock.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/limits.h>
+#include <sys/mman.h>
#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/vdso.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
-#include <machine/cpufunc.h>
-#include <machine/cpu.h>
#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/md_var.h>
#include <machine/pvclock.h>
/*
- * Last time; this guarantees a monotonically increasing clock for when
- * a stable TSC is not provided.
+ * Last system time. This is used to guarantee a monotonically non-decreasing
+ * clock for the kernel codepath and approximate the same for the vDSO codepath.
+ * In theory, this should be unnecessary absent hypervisor bug(s) and/or what
+ * should be rare cases where TSC jitter may still be visible despite the
+ * hypervisor's best efforts.
*/
-static volatile uint64_t pvclock_last_cycles;
+static volatile uint64_t pvclock_last_systime;
+
+static uint64_t pvclock_getsystime(struct pvclock *pvc);
+static void pvclock_read_time_info(
+ struct pvclock_vcpu_time_info *ti, uint64_t *ns, uint8_t *flags);
+static void pvclock_read_wall_clock(struct pvclock_wall_clock *wc,
+ struct timespec *ts);
+static u_int pvclock_tc_get_timecount(struct timecounter *tc);
+static uint32_t pvclock_tc_vdso_timehands(
+ struct vdso_timehands *vdso_th, struct timecounter *tc);
+#ifdef COMPAT_FREEBSD32
+static uint32_t pvclock_tc_vdso_timehands32(
+ struct vdso_timehands32 *vdso_th, struct timecounter *tc);
+#endif
+
+static d_open_t pvclock_cdev_open;
+static d_mmap_t pvclock_cdev_mmap;
+
+static struct cdevsw pvclock_cdev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_name = PVCLOCK_CDEVNAME,
+ .d_open = pvclock_cdev_open,
+ .d_mmap = pvclock_cdev_mmap,
+};
void
pvclock_resume(void)
{
-
- atomic_store_rel_64(&pvclock_last_cycles, 0);
-}
-
-uint64_t
-pvclock_get_last_cycles(void)
-{
-
- return (atomic_load_acq_64(&pvclock_last_cycles));
+ atomic_store_rel_64(&pvclock_last_systime, 0);
}
uint64_t
@@ -64,140 +94,243 @@
uint64_t freq;
freq = (1000000000ULL << 32) / ti->tsc_to_system_mul;
-
if (ti->tsc_shift < 0)
freq <<= -ti->tsc_shift;
else
freq >>= ti->tsc_shift;
-
return (freq);
}
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline uint64_t
-pvclock_scale_delta(uint64_t delta, uint32_t mul_frac, int shift)
-{
- uint64_t product;
-
- if (shift < 0)
- delta >>= -shift;
- else
- delta <<= shift;
-
-#if defined(__i386__)
- {
- uint32_t tmp1, tmp2;
-
- /**
- * For i386, the formula looks like:
- *
- * lower = (mul_frac * (delta & UINT_MAX)) >> 32
- * upper = mul_frac * (delta >> 32)
- * product = lower + upper
- */
- __asm__ (
- "mul %5 ; "
- "mov %4,%%eax ; "
- "mov %%edx,%4 ; "
- "mul %5 ; "
- "xor %5,%5 ; "
- "add %4,%%eax ; "
- "adc %5,%%edx ; "
- : "=A" (product), "=r" (tmp1), "=r" (tmp2)
- : "a" ((uint32_t)delta), "1" ((uint32_t)(delta >> 32)),
- "2" (mul_frac) );
- }
-#elif defined(__amd64__)
- {
- unsigned long tmp;
-
- __asm__ (
- "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
- : [lo]"=a" (product), [hi]"=d" (tmp)
- : "0" (delta), [mul_frac]"rm"((uint64_t)mul_frac));
- }
-#else
-#error "pvclock: unsupported x86 architecture?"
-#endif
-
- return (product);
-}
-
-static uint64_t
-pvclock_get_nsec_offset(struct pvclock_vcpu_time_info *ti)
-{
- uint64_t delta;
-
- delta = rdtsc() - ti->tsc_timestamp;
-
- return (pvclock_scale_delta(delta, ti->tsc_to_system_mul,
- ti->tsc_shift));
-}
-
static void
pvclock_read_time_info(struct pvclock_vcpu_time_info *ti,
- uint64_t *cycles, uint8_t *flags)
+ uint64_t *ns, uint8_t *flags)
{
+ uint64_t delta;
uint32_t version;
do {
- version = ti->version;
- rmb();
- *cycles = ti->system_time + pvclock_get_nsec_offset(ti);
+ version = atomic_load_acq_32(&ti->version);
+ delta = rdtsc_ordered() - ti->tsc_timestamp;
+ *ns = ti->system_time + pvclock_scale_delta(delta,
+ ti->tsc_to_system_mul, ti->tsc_shift);
*flags = ti->flags;
- rmb();
+ atomic_thread_fence_acq();
} while ((ti->version & 1) != 0 || ti->version != version);
}
static void
-pvclock_read_wall_clock(struct pvclock_wall_clock *wc, uint32_t *sec,
- uint32_t *nsec)
+pvclock_read_wall_clock(struct pvclock_wall_clock *wc, struct timespec *ts)
{
uint32_t version;
do {
- version = wc->version;
- rmb();
- *sec = wc->sec;
- *nsec = wc->nsec;
- rmb();
+ version = atomic_load_acq_32(&wc->version);
+ ts->tv_sec = wc->sec;
+ ts->tv_nsec = wc->nsec;
+ atomic_thread_fence_acq();
} while ((wc->version & 1) != 0 || wc->version != version);
}
+static uint64_t
+pvclock_getsystime(struct pvclock *pvc)
+{
+ uint64_t now, last, ret;
+ uint8_t flags;
+
+ critical_enter();
+ pvclock_read_time_info(&pvc->timeinfos[curcpu], &now, &flags);
+ ret = now;
+ if ((flags & PVCLOCK_FLAG_TSC_STABLE) == 0) {
+ last = atomic_load_acq_64(&pvclock_last_systime);
+ do {
+ if (last > now) {
+ ret = last;
+ break;
+ }
+ } while (!atomic_fcmpset_rel_64(&pvclock_last_systime, &last,
+ now));
+ }
+ critical_exit();
+ return (ret);
+}
+
+/*
+ * NOTE: Transitional-only; this should be removed after 'dev/xen/timer/timer.c'
+ * has been migrated to the 'struct pvclock' API.
+ */
uint64_t
pvclock_get_timecount(struct pvclock_vcpu_time_info *ti)
{
- uint64_t now, last;
+ uint64_t now, last, ret;
uint8_t flags;
pvclock_read_time_info(ti, &now, &flags);
+ ret = now;
+ if ((flags & PVCLOCK_FLAG_TSC_STABLE) == 0) {
+ last = atomic_load_acq_64(&pvclock_last_systime);
+ do {
+ if (last > now) {
+ ret = last;
+ break;
+ }
+ } while (!atomic_fcmpset_rel_64(&pvclock_last_systime, &last,
+ now));
+ }
+ return (ret);
+}
- if (flags & PVCLOCK_FLAG_TSC_STABLE)
- return (now);
+/*
+ * NOTE: Transitional-only; this should be removed after 'dev/xen/timer/timer.c'
+ * has been migrated to the 'struct pvclock' API.
+ */
+void
+pvclock_get_wallclock(struct pvclock_wall_clock *wc, struct timespec *ts)
+{
+ pvclock_read_wall_clock(wc, ts);
+}
- /*
- * Enforce a monotonically increasing clock time across all VCPUs.
- * If our time is too old, use the last time and return. Otherwise,
- * try to update the last time.
- */
- do {
- last = atomic_load_acq_64(&pvclock_last_cycles);
- if (last > now)
- return (last);
- } while (!atomic_cmpset_64(&pvclock_last_cycles, last, now));
+static int
+pvclock_cdev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ if (oflags & FWRITE)
+ return (EPERM);
+ return (0);
+}
+
+static int
+pvclock_cdev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int nprot, vm_memattr_t *memattr)
+{
+ if (offset >= mp_ncpus * sizeof(struct pvclock_vcpu_time_info))
+ return (EINVAL);
+ if (PROT_EXTRACT(nprot) != PROT_READ)
+ return (EACCES);
+ *paddr = vtophys((uintptr_t)dev->si_drv1 + offset);
+ *memattr = VM_MEMATTR_DEFAULT;
+ return (0);
+}
- return (now);
+static u_int
+pvclock_tc_get_timecount(struct timecounter *tc)
+{
+ struct pvclock *pvc = tc->tc_priv;
+
+ return (pvclock_getsystime(pvc) & UINT_MAX);
+}
+
+static uint32_t
+pvclock_tc_vdso_timehands(struct vdso_timehands *vdso_th,
+ struct timecounter *tc)
+{
+ struct pvclock *pvc = tc->tc_priv;
+
+ vdso_th->th_algo = VDSO_TH_ALGO_X86_PVCLK;
+ vdso_th->th_x86_shift = 0;
+ vdso_th->th_x86_hpet_idx = 0;
+ vdso_th->th_x86_pvc_last_systime =
+ atomic_load_acq_64(&pvclock_last_systime);
+ vdso_th->th_x86_pvc_stable_mask = !pvc->vdso_force_unstable &&
+ pvc->stable_flag_supported ? PVCLOCK_FLAG_TSC_STABLE : 0;
+ bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
+ return (pvc->cdev != NULL && amd_feature & AMDID_RDTSCP);
}
+#ifdef COMPAT_FREEBSD32
+static uint32_t
+pvclock_tc_vdso_timehands32(struct vdso_timehands32 *vdso_th,
+ struct timecounter *tc)
+{
+ struct pvclock *pvc = tc->tc_priv;
+
+ vdso_th->th_algo = VDSO_TH_ALGO_X86_PVCLK;
+ vdso_th->th_x86_shift = 0;
+ vdso_th->th_x86_hpet_idx = 0;
+ vdso_th->th_x86_pvc_last_systime =
+ atomic_load_acq_64(&pvclock_last_systime);
+ vdso_th->th_x86_pvc_stable_mask = !pvc->vdso_force_unstable &&
+ pvc->stable_flag_supported ? PVCLOCK_FLAG_TSC_STABLE : 0;
+ bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
+ return (pvc->cdev != NULL && amd_feature & AMDID_RDTSCP);
+}
+#endif
+
void
-pvclock_get_wallclock(struct pvclock_wall_clock *wc, struct timespec *ts)
+pvclock_gettime(struct pvclock *pvc, struct timespec *ts)
{
- uint32_t sec, nsec;
+ struct timespec system_ts;
+ uint64_t system_ns;
+
+ pvclock_read_wall_clock(pvc->get_wallclock(pvc->get_wallclock_arg), ts);
+ system_ns = pvclock_getsystime(pvc);
+ system_ts.tv_sec = system_ns / 1000000000ULL;
+ system_ts.tv_nsec = system_ns % 1000000000ULL;
+ timespecadd(ts, &system_ts, ts);
+}
+
+void
+pvclock_init(struct pvclock *pvc, device_t dev, const char *tc_name,
+ int tc_quality, u_int tc_flags)
+{
+ struct make_dev_args mda;
+ int err;
+
+ KASSERT(((uintptr_t)pvc->timeinfos & PAGE_MASK) == 0,
+ ("Specified time info page(s) address is not page-aligned."));
+
+ /* Set up vDSO stable-flag suppression test facility: */
+ pvc->vdso_force_unstable = false;
+ SYSCTL_ADD_BOOL(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "vdso_force_unstable", CTLFLAG_RW, &pvc->vdso_force_unstable, 0,
+ "Forcibly deassert stable flag in vDSO codepath");
+
+ /* Set up timecounter and timecounter-supporting members: */
+ pvc->tc.tc_get_timecount = pvclock_tc_get_timecount;
+ pvc->tc.tc_poll_pps = NULL;
+ pvc->tc.tc_counter_mask = ~0U;
+ pvc->tc.tc_frequency = 1000000000ULL;
+ pvc->tc.tc_name = tc_name;
+ pvc->tc.tc_quality = tc_quality;
+ pvc->tc.tc_flags = tc_flags;
+ pvc->tc.tc_priv = pvc;
+ pvc->tc.tc_fill_vdso_timehands = pvclock_tc_vdso_timehands;
+#ifdef COMPAT_FREEBSD32
+ pvc->tc.tc_fill_vdso_timehands32 = pvclock_tc_vdso_timehands32;
+#endif
- pvclock_read_wall_clock(wc, &sec, &nsec);
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
+ /* Set up cdev for userspace mmapping of vCPU 0 time info page: */
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &pvclock_cdev_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0444;
+ mda.mda_si_drv1 = pvc->timeinfos;
+ err = make_dev_s(&mda, &pvc->cdev, PVCLOCK_CDEVNAME);
+ if (err != 0) {
+ device_printf(dev, "Could not create /dev/%s, error %d. Fast "
+ "time of day will be unavailable for this timecounter.\n",
+ PVCLOCK_CDEVNAME, err);
+ KASSERT(pvc->cdev == NULL,
+ ("Failed make_dev_s() unexpectedly inited cdev."));
+ }
+
+ /* Register timecounter: */
+ tc_init(&pvc->tc);
+
+ /*
+ * Register wallclock:
+ * The RTC registration API expects a resolution in microseconds;
+ * pvclock's 1ns resolution is rounded up to 1us.
+ */
+ clock_register(dev, 1);
+}
+
+int
+pvclock_destroy(struct pvclock *pvc)
+{
+ /*
+ * Not currently possible since there is no teardown counterpart of
+ * 'tc_init()'.
+ */
+ return (EBUSY);
}
Index: sys/x86/x86/tsc.c
===================================================================
--- sys/x86/x86/tsc.c
+++ sys/x86/x86/tsc.c
@@ -870,6 +870,8 @@
vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC;
vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv;
vdso_th->th_x86_hpet_idx = 0xffffffff;
+ vdso_th->th_x86_pvc_last_systime = 0;
+ vdso_th->th_x86_pvc_stable_mask = 0;
bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
return (1);
}
@@ -883,6 +885,8 @@
vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC;
vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv;
vdso_th32->th_x86_hpet_idx = 0xffffffff;
+ vdso_th32->th_x86_pvc_last_systime = 0;
+ vdso_th32->th_x86_pvc_stable_mask = 0;
bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
return (1);
}

File Metadata

Mime Type
text/plain
Expires
Mon, Nov 10, 8:31 AM (9 h, 46 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25102283
Default Alt Text
D31418.id93292.diff (19 KB)

Event Timeline