Index: head/sys/dev/ppbus/pps.c =================================================================== --- head/sys/dev/ppbus/pps.c (revision 44665) +++ head/sys/dev/ppbus/pps.c (revision 44666) @@ -1,212 +1,181 @@ /* * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * - * $Id: pps.c,v 1.12 1998/12/07 21:58:16 archie Exp $ + * $Id: pps.c,v 1.13 1999/01/30 15:35:39 nsouch Exp $ * * This driver implements a draft-mogul-pps-api-02.txt PPS source. * * The input pin is pin#10 * The echo output pin is pin#14 * */ #include "opt_devfs.h" -#include "opt_ntp.h" #include #include #include #include #include #ifdef DEVFS #include #endif #include #include #include "pps.h" #define PPS_NAME "lppps" /* our official name */ static struct pps_data { int pps_unit; struct ppb_device pps_dev; - pps_params_t ppsparam; - pps_info_t ppsinfo; + struct pps_state pps; } *softc[NPPS]; -static int ppscap = - PPS_CAPTUREASSERT | -#ifdef PPS_SYNC - PPS_HARDPPSONASSERT | -#endif /* PPS_SYNC */ - PPS_OFFSETASSERT | - PPS_ECHOASSERT | - PPS_TSFMT_TSPEC; - static int npps; +static pps_devsw_installed = 0; /* * Make ourselves visible as a ppbus driver */ static struct ppb_device *ppsprobe(struct ppb_data *ppb); static int ppsattach(struct ppb_device *dev); static void ppsintr(int unit); static void pps_drvinit(void *unused); static struct ppb_driver ppsdriver = { ppsprobe, ppsattach, PPS_NAME }; DATA_SET(ppbdriver_set, ppsdriver); static d_open_t ppsopen; static d_close_t ppsclose; static d_ioctl_t ppsioctl; #define CDEV_MAJOR 89 static struct cdevsw pps_cdevsw = { ppsopen, ppsclose, noread, nowrite, ppsioctl, nullstop, nullreset, nodevtotty, seltrue, nommap, nostrat, PPS_NAME, NULL, -1 }; + static struct ppb_device * ppsprobe(struct ppb_data *ppb) { struct pps_data *sc; sc = (struct pps_data *) malloc(sizeof(struct pps_data), M_TEMP, M_NOWAIT); if (!sc) { printf(PPS_NAME ": cannot malloc!\n"); return (0); } bzero(sc, sizeof(struct pps_data)); softc[npps] = sc; sc->pps_unit = npps++; sc->pps_dev.id_unit = sc->pps_unit; sc->pps_dev.ppb = ppb; sc->pps_dev.name = ppsdriver.name; sc->pps_dev.intr = ppsintr; + sc->pps.ppscap = PPS_CAPTUREASSERT | PPS_ECHOASSERT; + pps_init(&sc->pps); return (&sc->pps_dev); } static int ppsattach(struct ppb_device *dev) { + dev_t devt; + /* * Report ourselves */ printf(PPS_NAME "%d: on ppbus %d\n", dev->id_unit, dev->ppb->ppb_link->adapter_unit); #ifdef DEVFS devfs_add_devswf(&pps_cdevsw, dev->id_unit, DV_CHR, UID_ROOT, GID_WHEEL, 0600, PPS_NAME "%d", dev->id_unit); #endif - + if( ! pps_devsw_installed ) { + devt = makedev(CDEV_MAJOR, 0); + cdevsw_add(&devt, &pps_cdevsw, NULL); + pps_devsw_installed = 1; + } return (1); } static int ppsopen(dev_t dev, int flags, int fmt, struct proc *p) { struct pps_data *sc; u_int unit = minor(dev); if ((unit >= npps)) return (ENXIO); sc = softc[unit]; if (ppb_request_bus(&sc->pps_dev, PPB_WAIT|PPB_INTR)) return (EINTR); ppb_wctr(&sc->pps_dev, 0); ppb_wctr(&sc->pps_dev, IRQENABLE); return(0); } static int ppsclose(dev_t dev, int flags, int fmt, struct proc *p) { struct pps_data *sc = softc[minor(dev)]; - sc->ppsparam.mode = 0; + sc->pps.ppsparam.mode = 0; /* PHK ??? */ ppb_wdtr(&sc->pps_dev, 0); ppb_wctr(&sc->pps_dev, 0); ppb_release_bus(&sc->pps_dev); return(0); } static void ppsintr(int unit) { struct pps_data *sc = softc[unit]; - struct timespec tc; + struct timecounter *tc; + unsigned count; - nanotime(&tc); + tc = timecounter; + count = timecounter->tc_get_timecount(tc); if (!(ppb_rstr(&sc->pps_dev) & nACK)) return; - if (sc->ppsparam.mode & PPS_ECHOASSERT) + if (sc->pps.ppsparam.mode & PPS_ECHOASSERT) ppb_wctr(&sc->pps_dev, IRQENABLE | AUTOFEED); - if (sc->ppsparam.mode & PPS_OFFSETASSERT) { - timespecadd(&tc, &sc->ppsparam.assert_offset); - if (tc.tv_nsec < 0) { - tc.tv_sec--; - tc.tv_nsec += 1000000000; - } - } - sc->ppsinfo.assert_timestamp = tc; - sc->ppsinfo.assert_sequence++; -#ifdef PPS_SYNC - if (sc->ppsparam.mode & PPS_HARDPPSONASSERT) { - struct timeval tv; - - tv.tv_sec = tc.tv_sec; - tv.tv_usec = tc.tv_nsec / 1000; - hardpps(&tv, tv.tv_usec); - } -#endif /* PPS_SYNC */ - if (sc->ppsparam.mode & PPS_ECHOASSERT) + pps_event(&sc->pps, tc, count, PPS_CAPTUREASSERT); + if (sc->pps.ppsparam.mode & PPS_ECHOASSERT) ppb_wctr(&sc->pps_dev, IRQENABLE); } static int ppsioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) { struct pps_data *sc = softc[minor(dev)]; - return (std_pps_ioctl(cmd, data, &sc->ppsparam, &sc->ppsinfo, ppscap)); + return (pps_ioctl(cmd, data, &sc->pps)); } -static pps_devsw_installed = 0; - -static void -pps_drvinit(void *unused) -{ - dev_t dev; - - if( ! pps_devsw_installed ) { - dev = makedev(CDEV_MAJOR, 0); - cdevsw_add(&dev, &pps_cdevsw, NULL); - pps_devsw_installed = 1; - } -} - -SYSINIT(ppsdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,pps_drvinit,NULL) Index: head/sys/kern/kern_clock.c =================================================================== --- head/sys/kern/kern_clock.c (revision 44665) +++ head/sys/kern/kern_clock.c (revision 44666) @@ -1,866 +1,961 @@ /*- * Copyright (c) 1997, 1998 Poul-Henning Kamp * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 - * $Id: kern_clock.c,v 1.88 1999/02/19 19:34:49 luoqi Exp $ + * $Id: kern_clock.c,v 1.89 1999/03/08 12:35:58 phk Exp $ */ +#include "opt_ntp.h" + #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #ifdef GPROF #include #endif #if defined(SMP) && defined(BETTER_CLOCK) #include #endif /* This is where the NTIMECOUNTER option hangs out */ #include "opt_ntp.h" /* * Number of timecounters used to implement stable storage */ #ifndef NTIMECOUNTER #define NTIMECOUNTER 5 #endif static MALLOC_DEFINE(M_TIMECOUNTER, "timecounter", "Timecounter stable storage"); static void initclocks __P((void *dummy)); SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) static void tco_forward __P((int force)); static void tco_setscales __P((struct timecounter *tc)); static __inline unsigned tco_delta __P((struct timecounter *tc)); /* Some of these don't belong here, but it's easiest to concentrate them. */ #if defined(SMP) && defined(BETTER_CLOCK) long cp_time[CPUSTATES]; #else static long cp_time[CPUSTATES]; #endif long tk_cancc; long tk_nin; long tk_nout; long tk_rawcc; time_t time_second; /* * Which update policy to use. * 0 - every tick, bad hardware may fail with "calcru negative..." * 1 - more resistent to the above hardware, but less efficient. */ static int tco_method; /* * Implement a dummy timecounter which we can use until we get a real one * in the air. This allows the console and other early stuff to use * timeservices. */ static unsigned dummy_get_timecount(struct timecounter *tc) { static unsigned now; return (++now); } static struct timecounter dummy_timecounter = { dummy_get_timecount, 0, ~0u, 1000000, "dummy" }; struct timecounter *timecounter = &dummy_timecounter; /* * Clock handling routines. * * This code is written to operate with two timers that run independently of * each other. * * The main timer, running hz times per second, is used to trigger interval * timers, timeouts and rescheduling as needed. * * The second timer handles kernel and user profiling, * and does resource use estimation. If the second timer is programmable, * it is randomized to avoid aliasing between the two clocks. For example, * the randomization prevents an adversary from always giving up the cpu * just before its quantum expires. Otherwise, it would never accumulate * cpu ticks. The mean frequency of the second timer is stathz. * * If no second timer exists, stathz will be zero; in this case we drive * profiling and statistics off the main clock. This WILL NOT be accurate; * do not do it unless absolutely necessary. * * The statistics clock may (or may not) be run at a higher rate while * profiling. This profile clock runs at profhz. We require that profhz * be an integral multiple of stathz. * * If the statistics clock is running fast, it must be divided by the ratio * profhz/stathz for statistics. (For profiling, every tick counts.) * * Time-of-day is maintained using a "timecounter", which may or may * not be related to the hardware generating the above mentioned * interrupts. */ int stathz; int profhz; static int profprocs; int ticks; static int psdiv, pscnt; /* prof => stat divider */ int psratio; /* ratio: prof / stat */ /* * Initialize clock frequencies and start both clocks running. */ /* ARGSUSED*/ static void initclocks(dummy) void *dummy; { register int i; /* * Set divisors to 1 (normal case) and let the machine-specific * code do its bit. */ psdiv = pscnt = 1; cpu_initclocks(); /* * Compute profhz/stathz, and fix profhz if needed. */ i = stathz ? stathz : hz; if (profhz == 0) profhz = i; psratio = profhz / i; } /* * The real-time timer, interrupting hz times per second. */ void hardclock(frame) register struct clockframe *frame; { register struct proc *p; p = curproc; if (p) { register struct pstats *pstats; /* * Run current process's virtual and profile time, as needed. */ pstats = p->p_stats; if (CLKF_USERMODE(frame) && timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) psignal(p, SIGVTALRM); if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) psignal(p, SIGPROF); } #if defined(SMP) && defined(BETTER_CLOCK) forward_hardclock(pscnt); #endif /* * If no separate statistics clock is available, run it from here. */ if (stathz == 0) statclock(frame); tco_forward(0); ticks++; /* * Process callouts at a very low cpu priority, so we don't keep the * relatively high clock interrupt priority any longer than necessary. */ if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { if (CLKF_BASEPRI(frame)) { /* * Save the overhead of a software interrupt; * it will happen as soon as we return, so do it now. */ (void)splsoftclock(); softclock(); } else setsoftclock(); } else if (softticks + 1 == ticks) ++softticks; } /* * Compute number of ticks in the specified amount of time. */ int tvtohz(tv) struct timeval *tv; { register unsigned long ticks; register long sec, usec; /* * If the number of usecs in the whole seconds part of the time * difference fits in a long, then the total number of usecs will * fit in an unsigned long. Compute the total and convert it to * ticks, rounding up and adding 1 to allow for the current tick * to expire. Rounding also depends on unsigned long arithmetic * to avoid overflow. * * Otherwise, if the number of ticks in the whole seconds part of * the time difference fits in a long, then convert the parts to * ticks separately and add, using similar rounding methods and * overflow avoidance. This method would work in the previous * case but it is slightly slower and assumes that hz is integral. * * Otherwise, round the time difference down to the maximum * representable value. * * If ints have 32 bits, then the maximum value for any timeout in * 10ms ticks is 248 days. */ sec = tv->tv_sec; usec = tv->tv_usec; if (usec < 0) { sec--; usec += 1000000; } if (sec < 0) { #ifdef DIAGNOSTIC if (usec > 0) { sec++; usec -= 1000000; } printf("tvotohz: negative time difference %ld sec %ld usec\n", sec, usec); #endif ticks = 1; } else if (sec <= LONG_MAX / 1000000) ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) / tick + 1; else if (sec <= LONG_MAX / hz) ticks = sec * hz + ((unsigned long)usec + (tick - 1)) / tick + 1; else ticks = LONG_MAX; if (ticks > INT_MAX) ticks = INT_MAX; return ((int)ticks); } /* * Start profiling on a process. * * Kernel profiling passes proc0 which never exits and hence * keeps the profile clock running constantly. */ void startprofclock(p) register struct proc *p; { int s; if ((p->p_flag & P_PROFIL) == 0) { p->p_flag |= P_PROFIL; if (++profprocs == 1 && stathz != 0) { s = splstatclock(); psdiv = pscnt = psratio; setstatclockrate(profhz); splx(s); } } } /* * Stop profiling on a process. */ void stopprofclock(p) register struct proc *p; { int s; if (p->p_flag & P_PROFIL) { p->p_flag &= ~P_PROFIL; if (--profprocs == 0 && stathz != 0) { s = splstatclock(); psdiv = pscnt = 1; setstatclockrate(stathz); splx(s); } } } /* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. */ void statclock(frame) register struct clockframe *frame; { #ifdef GPROF register struct gmonparam *g; int i; #endif register struct proc *p; struct pstats *pstats; long rss; struct rusage *ru; struct vmspace *vm; if (curproc != NULL && CLKF_USERMODE(frame)) { p = curproc; if (p->p_flag & P_PROFIL) addupc_intr(p, CLKF_PC(frame), 1); #if defined(SMP) && defined(BETTER_CLOCK) if (stathz != 0) forward_statclock(pscnt); #endif if (--pscnt > 0) return; /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ p->p_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; else cp_time[CP_USER]++; } else { #ifdef GPROF /* * Kernel statistics are just like addupc_intr, only easier. */ g = &_gmonparam; if (g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #if defined(SMP) && defined(BETTER_CLOCK) if (stathz != 0) forward_statclock(pscnt); #endif if (--pscnt > 0) return; /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ p = curproc; if (CLKF_INTR(frame)) { if (p != NULL) p->p_iticks++; cp_time[CP_INTR]++; } else if (p != NULL) { p->p_sticks++; cp_time[CP_SYS]++; } else cp_time[CP_IDLE]++; } pscnt = psdiv; /* * We maintain statistics shown by user-level statistics * programs: the amount of time in each cpu state. */ /* * We adjust the priority of the current process. The priority of * a process gets worse as it accumulates CPU time. The cpu usage * estimator (p_estcpu) is increased here. The formula for computing * priorities (in kern_synch.c) will compute a different value each * time p_estcpu increases by 4. The cpu usage estimator ramps up * quite quickly when the process is running (linearly), and decays * away exponentially, at a rate which is proportionally slower when * the system is busy. The basic principal is that the system will * 90% forget that the process used a lot of CPU time in 5 * loadav * seconds. This causes the system to favor processes which haven't * run much recently, and to round-robin among other processes. */ if (p != NULL) { p->p_cpticks++; if (++p->p_estcpu == 0) p->p_estcpu--; if ((p->p_estcpu & 3) == 0) { resetpriority(p); if (p->p_priority >= PUSER) p->p_priority = p->p_usrpri; } /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && (ru = &pstats->p_ru) != NULL && (vm = p->p_vmspace) != NULL) { ru->ru_ixrss += pgtok(vm->vm_tsize); ru->ru_idrss += pgtok(vm->vm_dsize); ru->ru_isrss += pgtok(vm->vm_ssize); rss = pgtok(vmspace_resident_count(vm)); if (ru->ru_maxrss < rss) ru->ru_maxrss = rss; } } } /* * Return information about system clocks. */ static int sysctl_kern_clockrate SYSCTL_HANDLER_ARGS { struct clockinfo clkinfo; /* * Construct clockinfo structure. */ clkinfo.hz = hz; clkinfo.tick = tick; clkinfo.tickadj = tickadj; clkinfo.profhz = profhz; clkinfo.stathz = stathz ? stathz : hz; return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); } SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); static __inline unsigned tco_delta(struct timecounter *tc) { return ((tc->tc_get_timecount(tc) - tc->tc_offset_count) & tc->tc_counter_mask); } /* - * We have four functions for looking at the clock, two for microseconds - * and two for nanoseconds. For each there is fast but less precise - * version "get{nano|micro}time" which will return a time which is up - * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time" - * will return a timestamp which is as precise as possible. + * We have eight functions for looking at the clock, four for + * microseconds and four for nanoseconds. For each there is fast + * but less precise version "get{nano|micro}[up]time" which will + * return a time which is up to 1/HZ previous to the call, whereas + * the raw version "{nano|micro}[up]time" will return a timestamp + * which is as precise as possible. The "up" variants return the + * time relative to system boot, these are well suited for time + * interval measurements. */ void getmicrotime(struct timeval *tvp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; *tvp = tc->tc_microtime; } else { microtime(tvp); } } void getnanotime(struct timespec *tsp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; *tsp = tc->tc_nanotime; } else { nanotime(tsp); } } void microtime(struct timeval *tv) { struct timecounter *tc; tc = (struct timecounter *)timecounter; tv->tv_sec = tc->tc_offset_sec; tv->tv_usec = tc->tc_offset_micro; tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; tv->tv_usec += boottime.tv_usec; tv->tv_sec += boottime.tv_sec; while (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; tv->tv_sec++; } } void nanotime(struct timespec *ts) { unsigned count; u_int64_t delta; struct timecounter *tc; tc = (struct timecounter *)timecounter; ts->tv_sec = tc->tc_offset_sec; count = tco_delta(tc); delta = tc->tc_offset_nano; delta += ((u_int64_t)count * tc->tc_scale_nano_f); delta >>= 32; delta += ((u_int64_t)count * tc->tc_scale_nano_i); delta += boottime.tv_usec * 1000; ts->tv_sec += boottime.tv_sec; while (delta >= 1000000000) { delta -= 1000000000; ts->tv_sec++; } ts->tv_nsec = delta; } void -timecounter_timespec(unsigned count, struct timespec *ts) -{ - u_int64_t delta; - struct timecounter *tc; - - tc = (struct timecounter *)timecounter; - ts->tv_sec = tc->tc_offset_sec; - count -= tc->tc_offset_count; - count &= tc->tc_counter_mask; - delta = tc->tc_offset_nano; - delta += ((u_int64_t)count * tc->tc_scale_nano_f); - delta >>= 32; - delta += ((u_int64_t)count * tc->tc_scale_nano_i); - delta += boottime.tv_usec * 1000; - ts->tv_sec += boottime.tv_sec; - while (delta >= 1000000000) { - delta -= 1000000000; - ts->tv_sec++; - } - ts->tv_nsec = delta; -} - -void getmicrouptime(struct timeval *tvp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; tvp->tv_sec = tc->tc_offset_sec; tvp->tv_usec = tc->tc_offset_micro; } else { microuptime(tvp); } } void getnanouptime(struct timespec *tsp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; tsp->tv_sec = tc->tc_offset_sec; tsp->tv_nsec = tc->tc_offset_nano >> 32; } else { nanouptime(tsp); } } void microuptime(struct timeval *tv) { struct timecounter *tc; tc = (struct timecounter *)timecounter; tv->tv_sec = tc->tc_offset_sec; tv->tv_usec = tc->tc_offset_micro; tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; tv->tv_sec++; } } void nanouptime(struct timespec *ts) { unsigned count; u_int64_t delta; struct timecounter *tc; tc = (struct timecounter *)timecounter; ts->tv_sec = tc->tc_offset_sec; count = tco_delta(tc); delta = tc->tc_offset_nano; delta += ((u_int64_t)count * tc->tc_scale_nano_f); delta >>= 32; delta += ((u_int64_t)count * tc->tc_scale_nano_i); if (delta >= 1000000000) { delta -= 1000000000; ts->tv_sec++; } ts->tv_nsec = delta; } static void tco_setscales(struct timecounter *tc) { u_int64_t scale; scale = 1000000000LL << 32; scale += tc->tc_adjustment; scale /= tc->tc_frequency; tc->tc_scale_micro = scale / 1000; tc->tc_scale_nano_f = scale & 0xffffffff; tc->tc_scale_nano_i = scale >> 32; } void init_timecounter(struct timecounter *tc) { struct timespec ts1; struct timecounter *t1, *t2, *t3; int i; tc->tc_adjustment = 0; tco_setscales(tc); tc->tc_offset_count = tc->tc_get_timecount(tc); tc->tc_tweak = tc; MALLOC(t1, struct timecounter *, sizeof *t1, M_TIMECOUNTER, M_WAITOK); *t1 = *tc; t2 = t1; for (i = 1; i < NTIMECOUNTER; i++) { MALLOC(t3, struct timecounter *, sizeof *t3, M_TIMECOUNTER, M_WAITOK); *t3 = *tc; t3->tc_other = t2; t2 = t3; } t1->tc_other = t3; tc = t1; printf("Timecounter \"%s\" frequency %lu Hz\n", tc->tc_name, (u_long)tc->tc_frequency); /* XXX: For now always start using the counter. */ tc->tc_offset_count = tc->tc_get_timecount(tc); nanouptime(&ts1); tc->tc_offset_nano = (u_int64_t)ts1.tv_nsec << 32; tc->tc_offset_micro = ts1.tv_nsec / 1000; tc->tc_offset_sec = ts1.tv_sec; timecounter = tc; } void set_timecounter(struct timespec *ts) { struct timespec ts2; nanouptime(&ts2); boottime.tv_sec = ts->tv_sec - ts2.tv_sec; boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; if (boottime.tv_usec < 0) { boottime.tv_usec += 1000000; boottime.tv_sec--; } /* fiddle all the little crinkly bits around the fiords... */ tco_forward(1); } #if 0 /* Currently unused */ void switch_timecounter(struct timecounter *newtc) { int s; struct timecounter *tc; struct timespec ts; s = splclock(); tc = timecounter; if (newtc == tc || newtc == tc->tc_other) { splx(s); return; } nanouptime(&ts); newtc->tc_offset_sec = ts.tv_sec; newtc->tc_offset_nano = (u_int64_t)ts.tv_nsec << 32; newtc->tc_offset_micro = ts.tv_nsec / 1000; newtc->tc_offset_count = newtc->tc_get_timecount(newtc); timecounter = newtc; splx(s); } #endif static struct timecounter * sync_other_counter(void) { struct timecounter *tc, *tcn, *tco; unsigned delta; tco = timecounter; tc = tco->tc_other; tcn = tc->tc_other; *tc = *tco; tc->tc_other = tcn; delta = tco_delta(tc); tc->tc_offset_count += delta; tc->tc_offset_count &= tc->tc_counter_mask; tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_f; tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_i << 32; return (tc); } static void tco_forward(int force) { struct timecounter *tc, *tco; tco = timecounter; tc = sync_other_counter(); /* * We may be inducing a tiny error here, the tc_poll_pps() may * process a latched count which happens after the tco_delta() * in sync_other_counter(), which would extend the previous * counters parameters into the domain of this new one. * Since the timewindow is very small for this, the error is * going to be only a few weenieseconds (as Dave Mills would * say), so lets just not talk more about it, OK ? */ if (tco->tc_poll_pps) tco->tc_poll_pps(tco); if (timedelta != 0) { tc->tc_offset_nano += (u_int64_t)(tickdelta * 1000) << 32; timedelta -= tickdelta; force++; } while (tc->tc_offset_nano >= 1000000000ULL << 32) { tc->tc_offset_nano -= 1000000000ULL << 32; tc->tc_offset_sec++; - tc->tc_frequency = tc->tc_tweak->tc_frequency; - tc->tc_adjustment = tc->tc_tweak->tc_adjustment; ntp_update_second(tc); /* XXX only needed if xntpd runs */ tco_setscales(tc); force++; } if (tco_method && !force) return; tc->tc_offset_micro = (tc->tc_offset_nano / 1000) >> 32; /* Figure out the wall-clock time */ tc->tc_nanotime.tv_sec = tc->tc_offset_sec + boottime.tv_sec; tc->tc_nanotime.tv_nsec = (tc->tc_offset_nano >> 32) + boottime.tv_usec * 1000; tc->tc_microtime.tv_usec = tc->tc_offset_micro + boottime.tv_usec; if (tc->tc_nanotime.tv_nsec >= 1000000000) { tc->tc_nanotime.tv_nsec -= 1000000000; tc->tc_microtime.tv_usec -= 1000000; tc->tc_nanotime.tv_sec++; } time_second = tc->tc_microtime.tv_sec = tc->tc_nanotime.tv_sec; timecounter = tc; } -static int -sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS +SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); + +SYSCTL_INT(_kern_timecounter, KERN_ARGMAX, method, CTLFLAG_RW, &tco_method, 0, + "This variable determines the method used for updating timecounters. " + "If the default algorithm (0) fails with \"calcru negative...\" messages " + "try the alternate algorithm (1) which handles bad hardware better." + +); + + +int +pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) { + pps_params_t *app; + pps_info_t *api; - return (sysctl_handle_opaque(oidp, - &timecounter->tc_tweak->tc_frequency, - sizeof(timecounter->tc_tweak->tc_frequency), req)); + switch (cmd) { + case PPS_IOC_CREATE: + return (0); + case PPS_IOC_DESTROY: + return (0); + case PPS_IOC_SETPARAMS: + app = (pps_params_t *)data; + if (app->mode & ~pps->ppscap) + return (EINVAL); + pps->ppsparam = *app; + return (0); + case PPS_IOC_GETPARAMS: + app = (pps_params_t *)data; + *app = pps->ppsparam; + return (0); + case PPS_IOC_GETCAP: + *(int*)data = pps->ppscap; + return (0); + case PPS_IOC_FETCH: + api = (pps_info_t *)data; + pps->ppsinfo.current_mode = pps->ppsparam.mode; + *api = pps->ppsinfo; + return (0); + case PPS_IOC_WAIT: + return (EOPNOTSUPP); + default: + return (ENOTTY); + } } -static int -sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS +void +pps_init(struct pps_state *pps) { - - return (sysctl_handle_opaque(oidp, - &timecounter->tc_tweak->tc_adjustment, - sizeof(timecounter->tc_tweak->tc_adjustment), req)); + pps->ppscap |= PPS_TSFMT_TSPEC; + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_OFFSETASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_OFFSETCLEAR; +#ifdef PPS_SYNC + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_HARDPPSONASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_HARDPPSONCLEAR; +#endif } -SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); +void +pps_event(struct pps_state *pps, struct timecounter *tc, unsigned count, int event) +{ + struct timespec ts, *tsp, *osp; + u_int64_t delta; + unsigned tcount, *pcount; + int foff, fhard; + pps_seq_t *pseq; -SYSCTL_INT(_kern_timecounter, KERN_ARGMAX, method, CTLFLAG_RW, &tco_method, 0, - "This variable determines the method used for updating timecounters. " - "If the default algorithm (0) fails with \"calcru negative...\" messages " - "try the alternate algorithm (1) which handles bad hardware better." + /* Things would be easier with arrays... */ + if (event == PPS_CAPTUREASSERT) { + tsp = &pps->ppsinfo.assert_timestamp; + osp = &pps->ppsparam.assert_offset; + foff = pps->ppsparam.mode & PPS_OFFSETASSERT; + fhard = pps->ppsparam.mode & PPS_HARDPPSONASSERT; + pcount = &pps->ppscount[0]; + pseq = &pps->ppsinfo.assert_sequence; + } else { + tsp = &pps->ppsinfo.clear_timestamp; + osp = &pps->ppsparam.clear_offset; + foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; + fhard = pps->ppsparam.mode & PPS_HARDPPSONCLEAR; + pcount = &pps->ppscount[1]; + pseq = &pps->ppsinfo.clear_sequence; + } -); + /* The timecounter changed: bail */ + if (!pps->ppstc || + pps->ppstc->tc_name != tc->tc_name || + tc->tc_name != timecounter->tc_name) { + pps->ppstc = tc; + *pcount = count; + return; + } -SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); + /* Now, make sure we have the right instance */ + tc = timecounter; -SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); + /* Nothing really happened */ + if (*pcount == count) + return; + + *pcount = count; + + /* Convert the count to timespec */ + ts.tv_sec = tc->tc_offset_sec; + tcount = count - tc->tc_offset_count; + tcount &= tc->tc_counter_mask; + delta = tc->tc_offset_nano; + delta += ((u_int64_t)tcount * tc->tc_scale_nano_f); + delta >>= 32; + delta += ((u_int64_t)tcount * tc->tc_scale_nano_i); + delta += boottime.tv_usec * 1000; + ts.tv_sec += boottime.tv_sec; + while (delta >= 1000000000) { + delta -= 1000000000; + ts.tv_sec++; + } + ts.tv_nsec = delta; + + (*pseq)++; + *tsp = ts; + + if (foff) { + timespecadd(tsp, osp); + if (tsp->tv_nsec < 0) { + tsp->tv_nsec += 1000000000; + tsp->tv_sec -= 1; + } + } +#ifdef PPS_SYNC + if (fhard) { + /* magic, at its best... */ + tcount = count - pps->ppscount[2]; + pps->ppscount[2] = count; + tcount &= tc->tc_counter_mask; + delta = ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_f); + delta >>= 32; + delta += ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_i); + hardpps(tsp, delta); + } +#endif +} + Index: head/sys/kern/kern_ntptime.c =================================================================== --- head/sys/kern/kern_ntptime.c (revision 44665) +++ head/sys/kern/kern_ntptime.c (revision 44666) @@ -1,850 +1,813 @@ /*********************************************************************** * * * Copyright (c) David L. Mills 1993-1998 * * * * Permission to use, copy, modify, and distribute this software and * * its documentation for any purpose and without fee is hereby * * granted, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission * * notice appear in supporting documentation, and that the name * * University of Delaware not be used in advertising or publicity * * pertaining to distribution of the software without specific, * * written prior permission. The University of Delaware makes no * * representations about the suitability this software for any * * purpose. It is provided "as is" without express or implied * * warranty. * * * **********************************************************************/ /* * Adapted from the original sources for FreeBSD and timecounters by: - * Poul-Henning Kamp + * Poul-Henning Kamp . * * The 32bit version of the "LP" macros seems a bit past its "sell by" * date so I have retained only the 64bit version and included it directly * in this file. * * Only minor changes done to interface with the timecounters over in * sys/kern/kern_clock.c. Some of the comments below may be (even more) * confusing and/or plain wrong in that context. - * - * The PPS_SYNC/hardpps() is currently not supported. - * */ +#include "opt_ntp.h" + #include #include #include #include #include #include #include #include #include /* * Single-precision macros for 64-bit machines */ typedef long long l_fp; #define L_ADD(v, u) ((v) += (u)) #define L_SUB(v, u) ((v) -= (u)) #define L_ADDHI(v, a) ((v) += (long long)(a) << 32) #define L_NEG(v) ((v) = -(v)) #define L_RSHIFT(v, n) \ do { \ if ((v) < 0) \ (v) = -(-(v) >> (n)); \ else \ (v) = (v) >> (n); \ } while (0) #define L_MPY(v, a) ((v) *= (a)) #define L_CLR(v) ((v) = 0) #define L_ISNEG(v) ((v) < 0) #define L_LINT(v, a) ((v) = (long long)(a) << 32) #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) /* * Generic NTP kernel interface * * These routines constitute the Network Time Protocol (NTP) interfaces * for user and daemon application programs. The ntp_gettime() routine * provides the time, maximum error (synch distance) and estimated error * (dispersion) to client user application programs. The ntp_adjtime() * routine is used by the NTP daemon to adjust the system clock to an * externally derived time. The time offset and related variables set by * this routine are used by other routines in this module to adjust the * phase and frequency of the clock discipline loop which controls the * system clock. * * When the kernel time is reckoned directly in nanoseconds (NANO * defined), the time at each tick interrupt is derived directly from * the kernel time variable. When the kernel time is reckoned in * microseconds, (NANO undefined), the time is derived from the kernel * time variable together with a variable representing the leftover * nanoseconds at the last tick interrupt. In either case, the current * nanosecond time is reckoned from these values plus an interpolated * value derived by the clock routines in another architecture-specific * module. The interpolation can use either a dedicated counter or a * processor cycle counter (PCC) implemented in some architectures. * * Note that all routines must run at priority splclock or higher. */ /* * Phase/frequency-lock loop (PLL/FLL) definitions * * The nanosecond clock discipline uses two variable types, time * variables and frequency variables. Both types are represented as 64- * bit fixed-point quantities with the decimal point between two 32-bit * halves. On a 32-bit machine, each half is represented as a single * word and mathematical operations are done using multiple-precision * arithmetic. On a 64-bit machine, ordinary computer arithmetic is * used. * * A time variable is a signed 64-bit fixed-point number in ns and * fraction. It represents the remaining time offset to be amortized * over succeeding tick interrupts. The maximum time offset is about * 0.512 s and the resolution is about 2.3e-10 ns. * * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |s s s| ns | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | fraction | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * A frequency variable is a signed 64-bit fixed-point number in ns/s * and fraction. It represents the ns and fraction to be added to the * kernel time variable at each second. The maximum frequency offset is * about +-512000 ns/s and the resolution is about 2.3e-10 ns/s. * * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |s s s s s s s s s s s s s| ns/s | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | fraction | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ /* * The following variables establish the state of the PLL/FLL and the * residual time and frequency offset of the local clock. */ #define SHIFT_PLL 4 /* PLL loop gain (shift) */ #define SHIFT_FLL 2 /* FLL loop gain (shift) */ static int time_state = TIME_OK; /* clock state */ static int time_status = STA_UNSYNC; /* clock status bits */ static long time_constant; /* poll interval (shift) (s) */ static long time_precision = 1; /* clock precision (ns) */ static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ static long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ static long time_reftime; /* time at last adjustment (s) */ static long time_tick; /* nanoseconds per tick (ns) */ static l_fp time_offset; /* time offset (ns) */ static l_fp time_freq; /* frequency offset (ns/s) */ #ifdef PPS_SYNC /* * The following variables are used when a pulse-per-second (PPS) signal * is available and connected via a modem control lead. They establish * the engineering parameters of the clock discipline loop when * controlled by the PPS signal. */ #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ #define PPS_FAVGMAX 8 /* max freq avg interval (s) (shift) */ #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ #define PPS_VALID 120 /* PPS signal watchdog max (s) */ #define MAXTIME 500000 /* max PPS error (jitter) (ns) */ #define MAXWANDER 500000 /* max PPS wander (ns/s/s) */ struct ppstime { long sec; /* PPS seconds */ long nsec; /* PPS nanoseconds */ long count; /* PPS nanosecond counter */ }; static struct ppstime pps_tf[3]; /* phase median filter */ static struct ppstime pps_filt; /* phase offset */ static l_fp pps_freq; /* scaled frequency offset (ns/s) */ -static long pps_lastfreq; /* last scaled freq offset (ns/s) */ static long pps_offacc; /* offset accumulator */ static long pps_jitter; /* scaled time dispersion (ns) */ static long pps_stabil; /* scaled frequency dispersion (ns/s) */ static long pps_lastcount; /* last counter offset */ static long pps_lastsec; /* time at last calibration (s) */ static int pps_valid; /* signal watchdog counter */ static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ static int pps_intcnt; /* wander counter */ static int pps_offcnt; /* offset accumulator counter */ /* * PPS signal quality monitors */ static long pps_calcnt; /* calibration intervals */ static long pps_jitcnt; /* jitter limit exceeded */ static long pps_stbcnt; /* stability limit exceeded */ static long pps_errcnt; /* calibration errors */ #endif /* PPS_SYNC */ /* * End of phase/frequency-lock loop (PLL/FLL) definitions */ static void ntp_init(void); static void hardupdate(long offset); /* * ntp_gettime() - NTP user application interface * * See the timex.h header file for synopsis and API description. */ static int ntp_sysctl SYSCTL_HANDLER_ARGS { struct ntptimeval ntv; /* temporary structure */ struct timespec atv; /* nanosecond time */ nanotime(&atv); ntv.time.tv_sec = atv.tv_sec; ntv.time.tv_nsec = atv.tv_nsec; ntv.maxerror = time_maxerror; ntv.esterror = time_esterror; ntv.time_state = time_state; /* * Status word error decode. If any of these conditions occur, * an error is returned, instead of the status word. Most * applications will care only about the fact the system clock * may not be trusted, not about the details. * * Hardware or software error */ if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) || /* * PPS signal lost when either time or frequency synchronization * requested */ (time_status & (STA_PPSFREQ | STA_PPSTIME) && !(time_status & STA_PPSSIGNAL)) || /* * PPS jitter exceeded when time synchronization requested */ (time_status & STA_PPSTIME && time_status & STA_PPSJITTER) || /* * PPS wander exceeded or calibration error when frequency * synchronization requested */ (time_status & STA_PPSFREQ && time_status & (STA_PPSWANDER | STA_PPSERROR))) ntv.time_state = TIME_ERROR; return (sysctl_handle_opaque(oidp, &ntv, sizeof ntv, req)); } SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE|CTLFLAG_RD, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", ""); /* * ntp_adjtime() - NTP daemon application interface * * See the timex.h header file for synopsis and API description. */ #ifndef _SYS_SYSPROTO_H_ struct ntp_adjtime_args { struct timex *tp; }; #endif int ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap) { struct timex ntv; /* temporary structure */ int modes; /* mode bits from structure */ int s; /* caller priority */ int error; error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); if (error) return(error); /* * Update selected clock variables - only the superuser can * change anything. Note that there is no error checking here on * the assumption the superuser should know what it is doing. */ modes = ntv.modes; error = suser(p->p_cred->pc_ucred, &p->p_acflag); if (error) return (error); s = splclock(); if (modes & MOD_FREQUENCY) { L_LINT(time_freq, ntv.freq / SCALE_PPM); #ifdef PPS_SYNC pps_freq = time_freq; #endif /* PPS_SYNC */ } if (modes & MOD_MAXERROR) time_maxerror = ntv.maxerror; if (modes & MOD_ESTERROR) time_esterror = ntv.esterror; if (modes & MOD_STATUS) { time_status &= STA_RONLY; time_status |= ntv.status & ~STA_RONLY; } if (modes & MOD_TIMECONST) time_constant = ntv.constant; if (modes & MOD_NANO) time_status |= STA_NANO; if (modes & MOD_MICRO) time_status &= ~STA_NANO; if (modes & MOD_CLKB) time_status |= STA_CLK; if (modes & MOD_CLKA) time_status &= ~STA_CLK; if (modes & MOD_OFFSET) { if (time_status & STA_NANO) hardupdate(ntv.offset); else hardupdate(ntv.offset * 1000); } /* * Retrieve all clock variables */ if (time_status & STA_NANO) ntv.offset = L_GINT(time_offset); else ntv.offset = L_GINT(time_offset) / 1000; ntv.freq = L_GINT(time_freq) * SCALE_PPM; ntv.maxerror = time_maxerror; ntv.esterror = time_esterror; ntv.status = time_status; if (ntv.constant < 0) time_constant = 0; else if (ntv.constant > MAXTC) time_constant = MAXTC; else time_constant = ntv.constant; if (time_status & STA_NANO) ntv.precision = time_precision; else ntv.precision = time_precision / 1000; ntv.tolerance = MAXFREQ * SCALE_PPM; #ifdef PPS_SYNC ntv.shift = pps_shift; ntv.ppsfreq = L_GINT(pps_freq) * SCALE_PPM; ntv.jitter = pps_jitter; if (time_status & STA_NANO) ntv.jitter = pps_jitter; else ntv.jitter = pps_jitter / 1000; ntv.stabil = pps_stabil; ntv.calcnt = pps_calcnt; ntv.errcnt = pps_errcnt; ntv.jitcnt = pps_jitcnt; ntv.stbcnt = pps_stbcnt; #endif /* PPS_SYNC */ splx(s); error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); if (error) return (error); /* * Status word error decode. See comments in * ntp_gettime() routine. */ if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) || (time_status & (STA_PPSFREQ | STA_PPSTIME) && !(time_status & STA_PPSSIGNAL)) || (time_status & STA_PPSTIME && time_status & STA_PPSJITTER) || (time_status & STA_PPSFREQ && time_status & (STA_PPSWANDER | STA_PPSERROR))) return (TIME_ERROR); return (time_state); } /* * second_overflow() - called after ntp_tick_adjust() * * This routine is ordinarily called immediately following the above * routine ntp_tick_adjust(). While these two routines are normally * combined, they are separated here only for the purposes of * simulation. */ void ntp_update_second(struct timecounter *tcp) { u_int32_t *newsec; - l_fp ftemp, time_adj; /* 32/64-bit temporary */ + l_fp ftemp, time_adj; /* 32/64-bit temporaries */ newsec = &tcp->tc_offset_sec; time_maxerror += MAXFREQ / 1000; /* * Leap second processing. If in leap-insert state at * the end of the day, the system clock is set back one * second; if in leap-delete state, the system clock is * set ahead one second. The nano_time() routine or * external clock driver will insure that reported time * is always monotonic. */ switch (time_state) { /* * No warning. */ case TIME_OK: if (time_status & STA_INS) time_state = TIME_INS; else if (time_status & STA_DEL) time_state = TIME_DEL; break; /* * Insert second 23:59:60 following second * 23:59:59. */ case TIME_INS: if (!(time_status & STA_INS)) time_state = TIME_OK; else if ((*newsec) % 86400 == 0) { (*newsec)--; time_state = TIME_OOP; } break; /* * Delete second 23:59:59. */ case TIME_DEL: if (!(time_status & STA_DEL)) time_state = TIME_OK; else if (((*newsec) + 1) % 86400 == 0) { (*newsec)++; time_state = TIME_WAIT; } break; /* * Insert second in progress. */ case TIME_OOP: time_state = TIME_WAIT; break; /* * Wait for status bits to clear. */ case TIME_WAIT: if (!(time_status & (STA_INS | STA_DEL))) time_state = TIME_OK; } /* * Compute the total time adjustment for the next * second in ns. The offset is reduced by a factor * depending on FLL or PLL mode and whether the PPS * signal is operating. Note that the value is in effect * scaled by the clock frequency, since the adjustment * is added at each tick interrupt. */ ftemp = time_offset; #ifdef PPS_SYNC if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) L_RSHIFT(ftemp, PPS_FAVG); else if (time_status & STA_MODE) #else if (time_status & STA_MODE) #endif /* PPS_SYNC */ L_RSHIFT(ftemp, SHIFT_FLL); else L_RSHIFT(ftemp, SHIFT_PLL + time_constant); time_adj = ftemp; L_SUB(time_offset, ftemp); L_ADD(time_adj, time_freq); tcp->tc_adjustment = time_adj; #ifdef PPS_SYNC if (pps_valid > 0) pps_valid--; else time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); #endif /* PPS_SYNC */ } /* * ntp_init() - initialize variables and structures * * This routine must be called after the kernel variables hz and tick * are set or changed and before the next tick interrupt. In this * particular implementation, these values are assumed set elsewhere in * the kernel. The design allows the clock frequency and tick interval * to be changed while the system is running. So, this routine should * probably be integrated with the code that does that. */ static void ntp_init() { /* * The following variable must be initialized any time the * kernel variable hz is changed. */ time_tick = NANOSECOND / hz; /* * The following variables are initialized only at startup. Only * those structures not cleared by the compiler need to be * initialized, and these only in the simulator. In the actual * kernel, any nonzero values here will quickly evaporate. */ L_CLR(time_offset); L_CLR(time_freq); #ifdef PPS_SYNC pps_filt.sec = pps_filt.nsec = pps_filt.count = 0; pps_tf[0] = pps_tf[1] = pps_tf[2] = pps_filt; L_CLR(pps_freq); #endif /* PPS_SYNC */ } SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, ntp_init, NULL) /* * hardupdate() - local clock update * * This routine is called by ntp_adjtime() to update the local clock * phase and frequency. The implementation is of an adaptive-parameter, * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new * time and frequency offset estimates for each call. If the kernel PPS * discipline code is configured (PPS_SYNC), the PPS signal itself * determines the new time offset, instead of the calling argument. * Presumably, calls to ntp_adjtime() occur only when the caller * believes the local clock is valid within some bound (+-128 ms with * NTP). If the caller's time is far different than the PPS time, an * argument will ensue, and it's not clear who will lose. * * For uncompensated quartz crystal oscillators and nominal update * intervals less than 256 s, operation should be in phase-lock mode, * where the loop is disciplined to phase. For update intervals greater * than 1024 s, operation should be in frequency-lock mode, where the * loop is disciplined to frequency. Between 256 s and 1024 s, the mode * is selected by the STA_MODE status bit. */ static void hardupdate(offset) long offset; /* clock offset (ns) */ { long ltemp, mtemp; l_fp ftemp; /* * Select how the phase is to be controlled and from which * source. If the PPS signal is present and enabled to * discipline the time, the PPS offset is used; otherwise, the * argument offset is used. */ ltemp = offset; if (ltemp > MAXPHASE) ltemp = MAXPHASE; else if (ltemp < -MAXPHASE) ltemp = -MAXPHASE; if (!(time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)) L_LINT(time_offset, ltemp); /* * Select how the frequency is to be controlled and in which * mode (PLL or FLL). If the PPS signal is present and enabled * to discipline the frequency, the PPS frequency is used; * otherwise, the argument offset is used to compute it. */ if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { time_reftime = time_second; return; } if (time_status & STA_FREQHOLD || time_reftime == 0) time_reftime = time_second; mtemp = time_second - time_reftime; if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC) ) { L_LINT(ftemp, (ltemp << 4) / mtemp); L_RSHIFT(ftemp, SHIFT_FLL + 4); L_ADD(time_freq, ftemp); time_status |= STA_MODE; } else { L_LINT(ftemp, ltemp); L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); L_MPY(ftemp, mtemp); L_ADD(time_freq, ftemp); time_status &= ~STA_MODE; } time_reftime = time_second; if (L_GINT(time_freq) > MAXFREQ) L_LINT(time_freq, MAXFREQ); else if (L_GINT(time_freq) < -MAXFREQ) L_LINT(time_freq, -MAXFREQ); } #ifdef PPS_SYNC /* * hardpps() - discipline CPU clock oscillator to external PPS signal * * This routine is called at each PPS interrupt in order to discipline * the CPU clock oscillator to the PPS signal. It measures the PPS phase * and leaves it in a handy spot for the hardclock() routine. It * integrates successive PPS phase differences and calculates the * frequency offset. This is used in hardclock() to discipline the CPU * clock oscillator so that the intrinsic frequency error is cancelled * out. The code requires the caller to capture the time and * architecture-dependent hardware counter values in nanoseconds at the * on-time PPS signal transition. * * Note that, on some Unix systems this routine runs at an interrupt * priority level higher than the timer interrupt routine hardclock(). * Therefore, the variables used are distinct from the hardclock() * variables, except for the actual time and frequency variables, which * are determined by this routine and updated atomically. */ void hardpps(tsp, nsec) struct timespec *tsp; /* time at PPS */ long nsec; /* hardware counter at PPS */ { long u_sec, u_nsec, v_nsec; /* temps */ l_fp ftemp; /* * The signal is first processed by a frequency discriminator * which rejects noise and input signals with frequencies * outside the range 1 +-MAXFREQ PPS. If two hits occur in the * same second, we ignore the later hit; if not and a hit occurs * outside the range gate, keep the later hit but do not * process it. */ time_status |= STA_PPSSIGNAL | STA_PPSJITTER; time_status &= ~(STA_PPSWANDER | STA_PPSERROR); pps_valid = PPS_VALID; u_sec = tsp->tv_sec; u_nsec = tsp->tv_nsec; if (u_nsec >= (NANOSECOND >> 1)) { u_nsec -= NANOSECOND; u_sec++; } v_nsec = u_nsec - pps_tf[0].nsec; if (u_sec == pps_tf[0].sec && v_nsec < -MAXFREQ) { return; } pps_tf[2] = pps_tf[1]; pps_tf[1] = pps_tf[0]; pps_tf[0].sec = u_sec; pps_tf[0].nsec = u_nsec; /* * Compute the difference between the current and previous * counter values. If the difference exceeds 0.5 s, assume it * has wrapped around, so correct 1.0 s. If the result exceeds * the tick interval, the sample point has crossed a tick * boundary during the last second, so correct the tick. Very * intricate. */ - u_nsec = nsec - pps_lastcount; - pps_lastcount = nsec; + u_nsec = nsec; if (u_nsec > (NANOSECOND >> 1)) u_nsec -= NANOSECOND; else if (u_nsec < -(NANOSECOND >> 1)) u_nsec += NANOSECOND; +#if 0 if (u_nsec > (time_tick >> 1)) u_nsec -= time_tick; else if (u_nsec < -(time_tick >> 1)) u_nsec += time_tick; +#endif pps_tf[0].count = pps_tf[1].count + u_nsec; if (v_nsec > MAXFREQ) { return; } time_status &= ~STA_PPSJITTER; /* * A three-stage median filter is used to help denoise the PPS * time. The median sample becomes the time offset estimate; the * difference between the other two samples becomes the time * dispersion (jitter) estimate. */ if (pps_tf[0].nsec > pps_tf[1].nsec) { if (pps_tf[1].nsec > pps_tf[2].nsec) { pps_filt = pps_tf[1]; /* 0 1 2 */ u_nsec = pps_tf[0].nsec - pps_tf[2].nsec; } else if (pps_tf[2].nsec > pps_tf[0].nsec) { pps_filt = pps_tf[0]; /* 2 0 1 */ u_nsec = pps_tf[2].nsec - pps_tf[1].nsec; } else { pps_filt = pps_tf[2]; /* 0 2 1 */ u_nsec = pps_tf[0].nsec - pps_tf[1].nsec; } } else { if (pps_tf[1].nsec < pps_tf[2].nsec) { pps_filt = pps_tf[1]; /* 2 1 0 */ u_nsec = pps_tf[2].nsec - pps_tf[0].nsec; } else if (pps_tf[2].nsec < pps_tf[0].nsec) { pps_filt = pps_tf[0]; /* 1 0 2 */ u_nsec = pps_tf[1].nsec - pps_tf[2].nsec; } else { pps_filt = pps_tf[2]; /* 1 2 0 */ u_nsec = pps_tf[1].nsec - pps_tf[0].nsec; } } /* * Nominal jitter is due to PPS signal noise and interrupt * latency. If it exceeds the jitter limit, the sample is * discarded. otherwise, if so enabled, the time offset is * updated. The offsets are accumulated over the phase averaging * interval to improve accuracy. The jitter is averaged only for * performance monitoring. We can tolerate a modest loss of data * here without degrading time accuracy. */ if (u_nsec > MAXTIME) { time_status |= STA_PPSJITTER; pps_jitcnt++; } else if (time_status & STA_PPSTIME) { pps_offacc -= pps_filt.nsec; pps_offcnt++; } if (pps_offcnt >= (1 << PPS_PAVG)) { if (time_status & STA_PPSTIME) { L_LINT(time_offset, pps_offacc); L_RSHIFT(time_offset, PPS_PAVG); } pps_offacc = 0; pps_offcnt = 0; } pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; u_sec = pps_tf[0].sec - pps_lastsec; if (u_sec < (1 << pps_shift)) return; /* * At the end of the calibration interval the difference between * the first and last counter values becomes the scaled * frequency. It will later be divided by the length of the * interval to determine the frequency update. If the frequency * exceeds a sanity threshold, or if the actual calibration * interval is not equal to the expected length, the data are * discarded. We can tolerate a modest loss of data here without * degrading frequency ccuracy. */ pps_calcnt++; v_nsec = -pps_filt.count; pps_lastsec = pps_tf[0].sec; pps_tf[0].count = 0; u_nsec = MAXFREQ << pps_shift; if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { time_status |= STA_PPSERROR; pps_errcnt++; return; } /* * If the actual calibration interval is not equal to the * expected length, the data are discarded. If the wander is * less than the wander threshold for four consecutive * intervals, the interval is doubled; if it is greater than the * threshold for four consecutive intervals, the interval is * halved. The scaled frequency offset is converted to frequency * offset. The stability metric is calculated as the average of * recent frequency changes, but is used only for performance * monitoring. */ L_LINT(ftemp, v_nsec); L_RSHIFT(ftemp, pps_shift); L_SUB(ftemp, pps_freq); u_nsec = L_GINT(ftemp); if (u_nsec > MAXWANDER) { L_LINT(ftemp, MAXWANDER); pps_intcnt--; time_status |= STA_PPSWANDER; pps_stbcnt++; } else if (u_nsec < -MAXWANDER) { L_LINT(ftemp, -MAXWANDER); pps_intcnt--; time_status |= STA_PPSWANDER; pps_stbcnt++; } else { pps_intcnt++; } if (pps_intcnt >= 4) { pps_intcnt = 4; if (pps_shift < PPS_FAVGMAX) { pps_shift++; pps_intcnt = 0; } } else if (pps_intcnt <= -4) { pps_intcnt = -4; if (pps_shift > PPS_FAVG) { pps_shift--; pps_intcnt = 0; } } if (u_nsec < 0) u_nsec = -u_nsec; pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; /* * The frequency offset is averaged into the PPS frequency. If * enabled, the system clock frequency is updated as well. */ L_RSHIFT(ftemp, PPS_FAVG); L_ADD(pps_freq, ftemp); u_nsec = L_GINT(pps_freq); if (u_nsec > MAXFREQ) L_LINT(pps_freq, MAXFREQ); else if (u_nsec < -MAXFREQ) L_LINT(pps_freq, -MAXFREQ); if (time_status & STA_PPSFREQ) time_freq = pps_freq; } #endif /* PPS_SYNC */ - -int -std_pps_ioctl(u_long cmd, caddr_t data, pps_params_t *pp, pps_info_t *pi, int ppscap) -{ - pps_params_t *app; - pps_info_t *api; - - switch (cmd) { - case PPS_IOC_CREATE: - return (0); - case PPS_IOC_DESTROY: - return (0); - case PPS_IOC_SETPARAMS: - app = (pps_params_t *)data; - if (app->mode & ~ppscap) - return (EINVAL); - *pp = *app; - return (0); - case PPS_IOC_GETPARAMS: - app = (pps_params_t *)data; - *app = *pp; - return (0); - case PPS_IOC_GETCAP: - *(int*)data = ppscap; - return (0); - case PPS_IOC_FETCH: - api = (pps_info_t *)data; - *api = *pi; - pi->current_mode = pp->mode; - return (0); - case PPS_IOC_WAIT: - return (EOPNOTSUPP); - default: - return (ENODEV); - } -} Index: head/sys/kern/kern_tc.c =================================================================== --- head/sys/kern/kern_tc.c (revision 44665) +++ head/sys/kern/kern_tc.c (revision 44666) @@ -1,866 +1,961 @@ /*- * Copyright (c) 1997, 1998 Poul-Henning Kamp * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 - * $Id: kern_clock.c,v 1.88 1999/02/19 19:34:49 luoqi Exp $ + * $Id: kern_clock.c,v 1.89 1999/03/08 12:35:58 phk Exp $ */ +#include "opt_ntp.h" + #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #ifdef GPROF #include #endif #if defined(SMP) && defined(BETTER_CLOCK) #include #endif /* This is where the NTIMECOUNTER option hangs out */ #include "opt_ntp.h" /* * Number of timecounters used to implement stable storage */ #ifndef NTIMECOUNTER #define NTIMECOUNTER 5 #endif static MALLOC_DEFINE(M_TIMECOUNTER, "timecounter", "Timecounter stable storage"); static void initclocks __P((void *dummy)); SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) static void tco_forward __P((int force)); static void tco_setscales __P((struct timecounter *tc)); static __inline unsigned tco_delta __P((struct timecounter *tc)); /* Some of these don't belong here, but it's easiest to concentrate them. */ #if defined(SMP) && defined(BETTER_CLOCK) long cp_time[CPUSTATES]; #else static long cp_time[CPUSTATES]; #endif long tk_cancc; long tk_nin; long tk_nout; long tk_rawcc; time_t time_second; /* * Which update policy to use. * 0 - every tick, bad hardware may fail with "calcru negative..." * 1 - more resistent to the above hardware, but less efficient. */ static int tco_method; /* * Implement a dummy timecounter which we can use until we get a real one * in the air. This allows the console and other early stuff to use * timeservices. */ static unsigned dummy_get_timecount(struct timecounter *tc) { static unsigned now; return (++now); } static struct timecounter dummy_timecounter = { dummy_get_timecount, 0, ~0u, 1000000, "dummy" }; struct timecounter *timecounter = &dummy_timecounter; /* * Clock handling routines. * * This code is written to operate with two timers that run independently of * each other. * * The main timer, running hz times per second, is used to trigger interval * timers, timeouts and rescheduling as needed. * * The second timer handles kernel and user profiling, * and does resource use estimation. If the second timer is programmable, * it is randomized to avoid aliasing between the two clocks. For example, * the randomization prevents an adversary from always giving up the cpu * just before its quantum expires. Otherwise, it would never accumulate * cpu ticks. The mean frequency of the second timer is stathz. * * If no second timer exists, stathz will be zero; in this case we drive * profiling and statistics off the main clock. This WILL NOT be accurate; * do not do it unless absolutely necessary. * * The statistics clock may (or may not) be run at a higher rate while * profiling. This profile clock runs at profhz. We require that profhz * be an integral multiple of stathz. * * If the statistics clock is running fast, it must be divided by the ratio * profhz/stathz for statistics. (For profiling, every tick counts.) * * Time-of-day is maintained using a "timecounter", which may or may * not be related to the hardware generating the above mentioned * interrupts. */ int stathz; int profhz; static int profprocs; int ticks; static int psdiv, pscnt; /* prof => stat divider */ int psratio; /* ratio: prof / stat */ /* * Initialize clock frequencies and start both clocks running. */ /* ARGSUSED*/ static void initclocks(dummy) void *dummy; { register int i; /* * Set divisors to 1 (normal case) and let the machine-specific * code do its bit. */ psdiv = pscnt = 1; cpu_initclocks(); /* * Compute profhz/stathz, and fix profhz if needed. */ i = stathz ? stathz : hz; if (profhz == 0) profhz = i; psratio = profhz / i; } /* * The real-time timer, interrupting hz times per second. */ void hardclock(frame) register struct clockframe *frame; { register struct proc *p; p = curproc; if (p) { register struct pstats *pstats; /* * Run current process's virtual and profile time, as needed. */ pstats = p->p_stats; if (CLKF_USERMODE(frame) && timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) psignal(p, SIGVTALRM); if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) psignal(p, SIGPROF); } #if defined(SMP) && defined(BETTER_CLOCK) forward_hardclock(pscnt); #endif /* * If no separate statistics clock is available, run it from here. */ if (stathz == 0) statclock(frame); tco_forward(0); ticks++; /* * Process callouts at a very low cpu priority, so we don't keep the * relatively high clock interrupt priority any longer than necessary. */ if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { if (CLKF_BASEPRI(frame)) { /* * Save the overhead of a software interrupt; * it will happen as soon as we return, so do it now. */ (void)splsoftclock(); softclock(); } else setsoftclock(); } else if (softticks + 1 == ticks) ++softticks; } /* * Compute number of ticks in the specified amount of time. */ int tvtohz(tv) struct timeval *tv; { register unsigned long ticks; register long sec, usec; /* * If the number of usecs in the whole seconds part of the time * difference fits in a long, then the total number of usecs will * fit in an unsigned long. Compute the total and convert it to * ticks, rounding up and adding 1 to allow for the current tick * to expire. Rounding also depends on unsigned long arithmetic * to avoid overflow. * * Otherwise, if the number of ticks in the whole seconds part of * the time difference fits in a long, then convert the parts to * ticks separately and add, using similar rounding methods and * overflow avoidance. This method would work in the previous * case but it is slightly slower and assumes that hz is integral. * * Otherwise, round the time difference down to the maximum * representable value. * * If ints have 32 bits, then the maximum value for any timeout in * 10ms ticks is 248 days. */ sec = tv->tv_sec; usec = tv->tv_usec; if (usec < 0) { sec--; usec += 1000000; } if (sec < 0) { #ifdef DIAGNOSTIC if (usec > 0) { sec++; usec -= 1000000; } printf("tvotohz: negative time difference %ld sec %ld usec\n", sec, usec); #endif ticks = 1; } else if (sec <= LONG_MAX / 1000000) ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) / tick + 1; else if (sec <= LONG_MAX / hz) ticks = sec * hz + ((unsigned long)usec + (tick - 1)) / tick + 1; else ticks = LONG_MAX; if (ticks > INT_MAX) ticks = INT_MAX; return ((int)ticks); } /* * Start profiling on a process. * * Kernel profiling passes proc0 which never exits and hence * keeps the profile clock running constantly. */ void startprofclock(p) register struct proc *p; { int s; if ((p->p_flag & P_PROFIL) == 0) { p->p_flag |= P_PROFIL; if (++profprocs == 1 && stathz != 0) { s = splstatclock(); psdiv = pscnt = psratio; setstatclockrate(profhz); splx(s); } } } /* * Stop profiling on a process. */ void stopprofclock(p) register struct proc *p; { int s; if (p->p_flag & P_PROFIL) { p->p_flag &= ~P_PROFIL; if (--profprocs == 0 && stathz != 0) { s = splstatclock(); psdiv = pscnt = 1; setstatclockrate(stathz); splx(s); } } } /* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. */ void statclock(frame) register struct clockframe *frame; { #ifdef GPROF register struct gmonparam *g; int i; #endif register struct proc *p; struct pstats *pstats; long rss; struct rusage *ru; struct vmspace *vm; if (curproc != NULL && CLKF_USERMODE(frame)) { p = curproc; if (p->p_flag & P_PROFIL) addupc_intr(p, CLKF_PC(frame), 1); #if defined(SMP) && defined(BETTER_CLOCK) if (stathz != 0) forward_statclock(pscnt); #endif if (--pscnt > 0) return; /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ p->p_uticks++; if (p->p_nice > NZERO) cp_time[CP_NICE]++; else cp_time[CP_USER]++; } else { #ifdef GPROF /* * Kernel statistics are just like addupc_intr, only easier. */ g = &_gmonparam; if (g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #if defined(SMP) && defined(BETTER_CLOCK) if (stathz != 0) forward_statclock(pscnt); #endif if (--pscnt > 0) return; /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ p = curproc; if (CLKF_INTR(frame)) { if (p != NULL) p->p_iticks++; cp_time[CP_INTR]++; } else if (p != NULL) { p->p_sticks++; cp_time[CP_SYS]++; } else cp_time[CP_IDLE]++; } pscnt = psdiv; /* * We maintain statistics shown by user-level statistics * programs: the amount of time in each cpu state. */ /* * We adjust the priority of the current process. The priority of * a process gets worse as it accumulates CPU time. The cpu usage * estimator (p_estcpu) is increased here. The formula for computing * priorities (in kern_synch.c) will compute a different value each * time p_estcpu increases by 4. The cpu usage estimator ramps up * quite quickly when the process is running (linearly), and decays * away exponentially, at a rate which is proportionally slower when * the system is busy. The basic principal is that the system will * 90% forget that the process used a lot of CPU time in 5 * loadav * seconds. This causes the system to favor processes which haven't * run much recently, and to round-robin among other processes. */ if (p != NULL) { p->p_cpticks++; if (++p->p_estcpu == 0) p->p_estcpu--; if ((p->p_estcpu & 3) == 0) { resetpriority(p); if (p->p_priority >= PUSER) p->p_priority = p->p_usrpri; } /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && (ru = &pstats->p_ru) != NULL && (vm = p->p_vmspace) != NULL) { ru->ru_ixrss += pgtok(vm->vm_tsize); ru->ru_idrss += pgtok(vm->vm_dsize); ru->ru_isrss += pgtok(vm->vm_ssize); rss = pgtok(vmspace_resident_count(vm)); if (ru->ru_maxrss < rss) ru->ru_maxrss = rss; } } } /* * Return information about system clocks. */ static int sysctl_kern_clockrate SYSCTL_HANDLER_ARGS { struct clockinfo clkinfo; /* * Construct clockinfo structure. */ clkinfo.hz = hz; clkinfo.tick = tick; clkinfo.tickadj = tickadj; clkinfo.profhz = profhz; clkinfo.stathz = stathz ? stathz : hz; return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); } SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); static __inline unsigned tco_delta(struct timecounter *tc) { return ((tc->tc_get_timecount(tc) - tc->tc_offset_count) & tc->tc_counter_mask); } /* - * We have four functions for looking at the clock, two for microseconds - * and two for nanoseconds. For each there is fast but less precise - * version "get{nano|micro}time" which will return a time which is up - * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time" - * will return a timestamp which is as precise as possible. + * We have eight functions for looking at the clock, four for + * microseconds and four for nanoseconds. For each there is fast + * but less precise version "get{nano|micro}[up]time" which will + * return a time which is up to 1/HZ previous to the call, whereas + * the raw version "{nano|micro}[up]time" will return a timestamp + * which is as precise as possible. The "up" variants return the + * time relative to system boot, these are well suited for time + * interval measurements. */ void getmicrotime(struct timeval *tvp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; *tvp = tc->tc_microtime; } else { microtime(tvp); } } void getnanotime(struct timespec *tsp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; *tsp = tc->tc_nanotime; } else { nanotime(tsp); } } void microtime(struct timeval *tv) { struct timecounter *tc; tc = (struct timecounter *)timecounter; tv->tv_sec = tc->tc_offset_sec; tv->tv_usec = tc->tc_offset_micro; tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; tv->tv_usec += boottime.tv_usec; tv->tv_sec += boottime.tv_sec; while (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; tv->tv_sec++; } } void nanotime(struct timespec *ts) { unsigned count; u_int64_t delta; struct timecounter *tc; tc = (struct timecounter *)timecounter; ts->tv_sec = tc->tc_offset_sec; count = tco_delta(tc); delta = tc->tc_offset_nano; delta += ((u_int64_t)count * tc->tc_scale_nano_f); delta >>= 32; delta += ((u_int64_t)count * tc->tc_scale_nano_i); delta += boottime.tv_usec * 1000; ts->tv_sec += boottime.tv_sec; while (delta >= 1000000000) { delta -= 1000000000; ts->tv_sec++; } ts->tv_nsec = delta; } void -timecounter_timespec(unsigned count, struct timespec *ts) -{ - u_int64_t delta; - struct timecounter *tc; - - tc = (struct timecounter *)timecounter; - ts->tv_sec = tc->tc_offset_sec; - count -= tc->tc_offset_count; - count &= tc->tc_counter_mask; - delta = tc->tc_offset_nano; - delta += ((u_int64_t)count * tc->tc_scale_nano_f); - delta >>= 32; - delta += ((u_int64_t)count * tc->tc_scale_nano_i); - delta += boottime.tv_usec * 1000; - ts->tv_sec += boottime.tv_sec; - while (delta >= 1000000000) { - delta -= 1000000000; - ts->tv_sec++; - } - ts->tv_nsec = delta; -} - -void getmicrouptime(struct timeval *tvp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; tvp->tv_sec = tc->tc_offset_sec; tvp->tv_usec = tc->tc_offset_micro; } else { microuptime(tvp); } } void getnanouptime(struct timespec *tsp) { struct timecounter *tc; if (!tco_method) { tc = timecounter; tsp->tv_sec = tc->tc_offset_sec; tsp->tv_nsec = tc->tc_offset_nano >> 32; } else { nanouptime(tsp); } } void microuptime(struct timeval *tv) { struct timecounter *tc; tc = (struct timecounter *)timecounter; tv->tv_sec = tc->tc_offset_sec; tv->tv_usec = tc->tc_offset_micro; tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; tv->tv_sec++; } } void nanouptime(struct timespec *ts) { unsigned count; u_int64_t delta; struct timecounter *tc; tc = (struct timecounter *)timecounter; ts->tv_sec = tc->tc_offset_sec; count = tco_delta(tc); delta = tc->tc_offset_nano; delta += ((u_int64_t)count * tc->tc_scale_nano_f); delta >>= 32; delta += ((u_int64_t)count * tc->tc_scale_nano_i); if (delta >= 1000000000) { delta -= 1000000000; ts->tv_sec++; } ts->tv_nsec = delta; } static void tco_setscales(struct timecounter *tc) { u_int64_t scale; scale = 1000000000LL << 32; scale += tc->tc_adjustment; scale /= tc->tc_frequency; tc->tc_scale_micro = scale / 1000; tc->tc_scale_nano_f = scale & 0xffffffff; tc->tc_scale_nano_i = scale >> 32; } void init_timecounter(struct timecounter *tc) { struct timespec ts1; struct timecounter *t1, *t2, *t3; int i; tc->tc_adjustment = 0; tco_setscales(tc); tc->tc_offset_count = tc->tc_get_timecount(tc); tc->tc_tweak = tc; MALLOC(t1, struct timecounter *, sizeof *t1, M_TIMECOUNTER, M_WAITOK); *t1 = *tc; t2 = t1; for (i = 1; i < NTIMECOUNTER; i++) { MALLOC(t3, struct timecounter *, sizeof *t3, M_TIMECOUNTER, M_WAITOK); *t3 = *tc; t3->tc_other = t2; t2 = t3; } t1->tc_other = t3; tc = t1; printf("Timecounter \"%s\" frequency %lu Hz\n", tc->tc_name, (u_long)tc->tc_frequency); /* XXX: For now always start using the counter. */ tc->tc_offset_count = tc->tc_get_timecount(tc); nanouptime(&ts1); tc->tc_offset_nano = (u_int64_t)ts1.tv_nsec << 32; tc->tc_offset_micro = ts1.tv_nsec / 1000; tc->tc_offset_sec = ts1.tv_sec; timecounter = tc; } void set_timecounter(struct timespec *ts) { struct timespec ts2; nanouptime(&ts2); boottime.tv_sec = ts->tv_sec - ts2.tv_sec; boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; if (boottime.tv_usec < 0) { boottime.tv_usec += 1000000; boottime.tv_sec--; } /* fiddle all the little crinkly bits around the fiords... */ tco_forward(1); } #if 0 /* Currently unused */ void switch_timecounter(struct timecounter *newtc) { int s; struct timecounter *tc; struct timespec ts; s = splclock(); tc = timecounter; if (newtc == tc || newtc == tc->tc_other) { splx(s); return; } nanouptime(&ts); newtc->tc_offset_sec = ts.tv_sec; newtc->tc_offset_nano = (u_int64_t)ts.tv_nsec << 32; newtc->tc_offset_micro = ts.tv_nsec / 1000; newtc->tc_offset_count = newtc->tc_get_timecount(newtc); timecounter = newtc; splx(s); } #endif static struct timecounter * sync_other_counter(void) { struct timecounter *tc, *tcn, *tco; unsigned delta; tco = timecounter; tc = tco->tc_other; tcn = tc->tc_other; *tc = *tco; tc->tc_other = tcn; delta = tco_delta(tc); tc->tc_offset_count += delta; tc->tc_offset_count &= tc->tc_counter_mask; tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_f; tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_i << 32; return (tc); } static void tco_forward(int force) { struct timecounter *tc, *tco; tco = timecounter; tc = sync_other_counter(); /* * We may be inducing a tiny error here, the tc_poll_pps() may * process a latched count which happens after the tco_delta() * in sync_other_counter(), which would extend the previous * counters parameters into the domain of this new one. * Since the timewindow is very small for this, the error is * going to be only a few weenieseconds (as Dave Mills would * say), so lets just not talk more about it, OK ? */ if (tco->tc_poll_pps) tco->tc_poll_pps(tco); if (timedelta != 0) { tc->tc_offset_nano += (u_int64_t)(tickdelta * 1000) << 32; timedelta -= tickdelta; force++; } while (tc->tc_offset_nano >= 1000000000ULL << 32) { tc->tc_offset_nano -= 1000000000ULL << 32; tc->tc_offset_sec++; - tc->tc_frequency = tc->tc_tweak->tc_frequency; - tc->tc_adjustment = tc->tc_tweak->tc_adjustment; ntp_update_second(tc); /* XXX only needed if xntpd runs */ tco_setscales(tc); force++; } if (tco_method && !force) return; tc->tc_offset_micro = (tc->tc_offset_nano / 1000) >> 32; /* Figure out the wall-clock time */ tc->tc_nanotime.tv_sec = tc->tc_offset_sec + boottime.tv_sec; tc->tc_nanotime.tv_nsec = (tc->tc_offset_nano >> 32) + boottime.tv_usec * 1000; tc->tc_microtime.tv_usec = tc->tc_offset_micro + boottime.tv_usec; if (tc->tc_nanotime.tv_nsec >= 1000000000) { tc->tc_nanotime.tv_nsec -= 1000000000; tc->tc_microtime.tv_usec -= 1000000; tc->tc_nanotime.tv_sec++; } time_second = tc->tc_microtime.tv_sec = tc->tc_nanotime.tv_sec; timecounter = tc; } -static int -sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS +SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); + +SYSCTL_INT(_kern_timecounter, KERN_ARGMAX, method, CTLFLAG_RW, &tco_method, 0, + "This variable determines the method used for updating timecounters. " + "If the default algorithm (0) fails with \"calcru negative...\" messages " + "try the alternate algorithm (1) which handles bad hardware better." + +); + + +int +pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) { + pps_params_t *app; + pps_info_t *api; - return (sysctl_handle_opaque(oidp, - &timecounter->tc_tweak->tc_frequency, - sizeof(timecounter->tc_tweak->tc_frequency), req)); + switch (cmd) { + case PPS_IOC_CREATE: + return (0); + case PPS_IOC_DESTROY: + return (0); + case PPS_IOC_SETPARAMS: + app = (pps_params_t *)data; + if (app->mode & ~pps->ppscap) + return (EINVAL); + pps->ppsparam = *app; + return (0); + case PPS_IOC_GETPARAMS: + app = (pps_params_t *)data; + *app = pps->ppsparam; + return (0); + case PPS_IOC_GETCAP: + *(int*)data = pps->ppscap; + return (0); + case PPS_IOC_FETCH: + api = (pps_info_t *)data; + pps->ppsinfo.current_mode = pps->ppsparam.mode; + *api = pps->ppsinfo; + return (0); + case PPS_IOC_WAIT: + return (EOPNOTSUPP); + default: + return (ENOTTY); + } } -static int -sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS +void +pps_init(struct pps_state *pps) { - - return (sysctl_handle_opaque(oidp, - &timecounter->tc_tweak->tc_adjustment, - sizeof(timecounter->tc_tweak->tc_adjustment), req)); + pps->ppscap |= PPS_TSFMT_TSPEC; + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_OFFSETASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_OFFSETCLEAR; +#ifdef PPS_SYNC + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_HARDPPSONASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_HARDPPSONCLEAR; +#endif } -SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); +void +pps_event(struct pps_state *pps, struct timecounter *tc, unsigned count, int event) +{ + struct timespec ts, *tsp, *osp; + u_int64_t delta; + unsigned tcount, *pcount; + int foff, fhard; + pps_seq_t *pseq; -SYSCTL_INT(_kern_timecounter, KERN_ARGMAX, method, CTLFLAG_RW, &tco_method, 0, - "This variable determines the method used for updating timecounters. " - "If the default algorithm (0) fails with \"calcru negative...\" messages " - "try the alternate algorithm (1) which handles bad hardware better." + /* Things would be easier with arrays... */ + if (event == PPS_CAPTUREASSERT) { + tsp = &pps->ppsinfo.assert_timestamp; + osp = &pps->ppsparam.assert_offset; + foff = pps->ppsparam.mode & PPS_OFFSETASSERT; + fhard = pps->ppsparam.mode & PPS_HARDPPSONASSERT; + pcount = &pps->ppscount[0]; + pseq = &pps->ppsinfo.assert_sequence; + } else { + tsp = &pps->ppsinfo.clear_timestamp; + osp = &pps->ppsparam.clear_offset; + foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; + fhard = pps->ppsparam.mode & PPS_HARDPPSONCLEAR; + pcount = &pps->ppscount[1]; + pseq = &pps->ppsinfo.clear_sequence; + } -); + /* The timecounter changed: bail */ + if (!pps->ppstc || + pps->ppstc->tc_name != tc->tc_name || + tc->tc_name != timecounter->tc_name) { + pps->ppstc = tc; + *pcount = count; + return; + } -SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); + /* Now, make sure we have the right instance */ + tc = timecounter; -SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, - 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); + /* Nothing really happened */ + if (*pcount == count) + return; + + *pcount = count; + + /* Convert the count to timespec */ + ts.tv_sec = tc->tc_offset_sec; + tcount = count - tc->tc_offset_count; + tcount &= tc->tc_counter_mask; + delta = tc->tc_offset_nano; + delta += ((u_int64_t)tcount * tc->tc_scale_nano_f); + delta >>= 32; + delta += ((u_int64_t)tcount * tc->tc_scale_nano_i); + delta += boottime.tv_usec * 1000; + ts.tv_sec += boottime.tv_sec; + while (delta >= 1000000000) { + delta -= 1000000000; + ts.tv_sec++; + } + ts.tv_nsec = delta; + + (*pseq)++; + *tsp = ts; + + if (foff) { + timespecadd(tsp, osp); + if (tsp->tv_nsec < 0) { + tsp->tv_nsec += 1000000000; + tsp->tv_sec -= 1; + } + } +#ifdef PPS_SYNC + if (fhard) { + /* magic, at its best... */ + tcount = count - pps->ppscount[2]; + pps->ppscount[2] = count; + tcount &= tc->tc_counter_mask; + delta = ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_f); + delta >>= 32; + delta += ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_i); + hardpps(tsp, delta); + } +#endif +} + Index: head/sys/pci/xrpu.c =================================================================== --- head/sys/pci/xrpu.c (revision 44665) +++ head/sys/pci/xrpu.c (revision 44666) @@ -1,271 +1,248 @@ /* * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * - * $Id: xrpu.c,v 1.5 1998/12/14 06:32:58 dillon Exp $ + * $Id: xrpu.c,v 1.6 1999/01/12 01:42:43 eivind Exp $ * * A very simple device driver for PCI cards based on Xilinx 6200 series * FPGA/RPU devices. Current Functionality is to allow you to open and * mmap the entire thing into your program. * * Hardware currently supported: * www.vcc.com HotWorks 1 6216 based card. * */ +#include "opt_devfs.h" + #include "xrpu.h" #include #include #include #include #include #include +#ifdef DEFVFS #include +#endif #include #include #include static const char* xrpu_probe (pcici_t tag, pcidi_t type); static void xrpu_attach (pcici_t tag, int unit); static u_long xrpu_count; static void xrpu_poll_pps(struct timecounter *tc); /* * Device driver initialization stuff */ static d_open_t xrpu_open; static d_close_t xrpu_close; static d_ioctl_t xrpu_ioctl; static d_mmap_t xrpu_mmap; #define CDEV_MAJOR 100 static struct cdevsw xrpudevsw = { xrpu_open, xrpu_close, noread, nowrite, xrpu_ioctl, nullstop, noreset, nodevtotty, seltrue, xrpu_mmap, nostrategy, "xrpu", NULL, -1 }; static MALLOC_DEFINE(M_XRPU, "xrpu", "XRPU related"); #define dev2unit(devt) (minor(devt) & 0xff) #define dev2pps(devt) ((minor(devt) >> 16)-1) static struct softc { pcici_t tag; enum { NORMAL, TIMECOUNTER } mode; vm_offset_t virbase, physbase; u_int *virbase62; struct timecounter tc; u_int *trigger, *latch, dummy; - struct { - pps_params_t params; - pps_info_t info; - int cap; - u_int *assert, last_assert; - u_int *clear, last_clear; - } pps[XRPU_MAX_PPS]; + struct pps_state pps[XRPU_MAX_PPS]; + u_int *assert[XRPU_MAX_PPS], *clear[XRPU_MAX_PPS]; } *softc[NXRPU]; static unsigned xrpu_get_timecount(struct timecounter *tc) { struct softc *sc = tc->tc_priv; sc->dummy += *sc->trigger; return (*sc->latch & tc->tc_counter_mask); } void xrpu_poll_pps(struct timecounter *tc) { struct softc *sc = tc->tc_priv; int i; unsigned count1, ppscount; for (i = 0; i < XRPU_MAX_PPS; i++) { - if (sc->pps[i].assert) { - ppscount = *(sc->pps[i].assert) & tc->tc_counter_mask; + if (sc->assert[i]) { + ppscount = *(sc->assert[i]) & tc->tc_counter_mask; do { count1 = ppscount; - ppscount = *(sc->pps[i].assert) & tc->tc_counter_mask; + ppscount = *(sc->assert[i]) & tc->tc_counter_mask; } while (ppscount != count1); - if (ppscount != sc->pps[i].last_assert) { - timecounter_timespec(ppscount, &sc->pps[i].info.assert_timestamp); - if (sc->pps[i].params.mode & PPS_OFFSETASSERT) { - timespecadd(&sc->pps[i].info.assert_timestamp, - &sc->pps[i].params.assert_offset); - if (sc->pps[i].info.assert_timestamp.tv_nsec < 0) { - sc->pps[i].info.assert_timestamp.tv_nsec += 1000000000; - sc->pps[i].info.assert_timestamp.tv_sec -= 1; - } - } - sc->pps[i].info.assert_sequence++; - sc->pps[i].last_assert = ppscount; - } + pps_event(&sc->pps[i], &sc->tc, ppscount, PPS_CAPTUREASSERT); } - if (sc->pps[i].clear) { - ppscount = *(sc->pps[i].clear) & tc->tc_counter_mask; + if (sc->clear[i]) { + ppscount = *(sc->clear[i]) & tc->tc_counter_mask; do { count1 = ppscount; - ppscount = *(sc->pps[i].clear) & tc->tc_counter_mask; + ppscount = *(sc->clear[i]) & tc->tc_counter_mask; } while (ppscount != count1); - if (ppscount != sc->pps[i].last_clear) { - timecounter_timespec(ppscount, &sc->pps[i].info.clear_timestamp); - if (sc->pps[i].params.mode & PPS_OFFSETASSERT) { - timespecadd(&sc->pps[i].info.clear_timestamp, - &sc->pps[i].params.clear_offset); - if (sc->pps[i].info.clear_timestamp.tv_nsec < 0) { - sc->pps[i].info.clear_timestamp.tv_nsec += 1000000000; - sc->pps[i].info.clear_timestamp.tv_sec -= 1; - } - } - sc->pps[i].info.clear_sequence++; - sc->pps[i].last_clear = ppscount; - } + pps_event(&sc->pps[i], &sc->tc, ppscount, PPS_CAPTURECLEAR); } } } static int xrpu_open(dev_t dev, int flag, int mode, struct proc *p) { return (0); } static int xrpu_close(dev_t dev, int flag, int mode, struct proc *p) { return (0); } static int xrpu_mmap(dev_t dev, vm_offset_t offset, int nprot) { struct softc *sc = softc[dev2unit(dev)]; if (offset >= 0x1000000) return (-1); return (i386_btop(sc->physbase + offset)); } static int xrpu_ioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *pr) { struct softc *sc = softc[dev2unit(dev)]; int i, error; if (sc->mode == TIMECOUNTER) { i = dev2pps(dev); if (i < 0 || i >= XRPU_MAX_PPS) return ENODEV; - if (!sc->pps[i].cap) - return ENODEV; - error = std_pps_ioctl(cmd, arg, &sc->pps[i].params, - &sc->pps[i].info, sc->pps[i].cap); + error = pps_ioctl(cmd, arg, &sc->pps[i]); return (error); } if (cmd == XRPU_IOC_TIMECOUNTING) { struct xrpu_timecounting *xt = (struct xrpu_timecounting *)arg; /* Name SHALL be zero terminated */ xt->xt_name[sizeof xt->xt_name - 1] = '\0'; i = strlen(xt->xt_name); sc->tc.tc_name = (char *)malloc(i + 1, M_XRPU, M_WAITOK); strcpy(sc->tc.tc_name, xt->xt_name); sc->tc.tc_frequency = xt->xt_frequency; sc->tc.tc_get_timecount = xrpu_get_timecount; sc->tc.tc_poll_pps = xrpu_poll_pps; sc->tc.tc_priv = sc; sc->tc.tc_counter_mask = xt->xt_mask; sc->trigger = sc->virbase62 + xt->xt_addr_trigger; sc->latch = sc->virbase62 + xt->xt_addr_latch; for (i = 0; i < XRPU_MAX_PPS; i++) { if (xt->xt_pps[i].xt_addr_assert == 0 && xt->xt_pps[i].xt_addr_clear == 0) continue; - devfs_add_devswf(&xrpudevsw, (i+1)<<16, DV_CHR, UID_ROOT, GID_WHEEL, 0600, - "xpps%d", i); - /* DEVFS */ +#ifdef DEVFS + devfs_add_devswf(&xrpudevsw, (i+1)<<16, DV_CHR, UID_ROOT, GID_WHEEL, + 0600, "xpps%d", i); +#endif + sc->pps[i].ppscap = 0; if (xt->xt_pps[i].xt_addr_assert) { - sc->pps[i].assert = sc->virbase62 + xt->xt_pps[i].xt_addr_assert; - sc->pps[i].cap |= PPS_CAPTUREASSERT | PPS_OFFSETASSERT; + sc->assert[i] = sc->virbase62 + xt->xt_pps[i].xt_addr_assert; + sc->pps[i].ppscap |= PPS_CAPTUREASSERT; } if (xt->xt_pps[i].xt_addr_clear) { - sc->pps[i].clear = sc->virbase62 + xt->xt_pps[i].xt_addr_clear; - sc->pps[i].cap |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; + sc->clear[i] = sc->virbase62 + xt->xt_pps[i].xt_addr_clear; + sc->pps[i].ppscap |= PPS_CAPTURECLEAR; } + pps_init(&sc->pps[i]); } sc->mode = TIMECOUNTER; init_timecounter(&sc->tc); return (0); } error = ENOTTY; return (error); } /* * PCI initialization stuff */ static struct pci_device xrpu_device = { "xrpu", xrpu_probe, xrpu_attach, &xrpu_count, NULL }; DATA_SET (pcidevice_set, xrpu_device); static const char* xrpu_probe (pcici_t tag, pcidi_t typea) { u_int id; const char *vendor, *chip, *type; (void)pci_conf_read(tag, PCI_CLASS_REG); id = pci_conf_read(tag, PCI_ID_REG); vendor = chip = type = 0; if (id == 0x6216133e) { return "VCC Hotworks-I xc6216"; } return 0; } static void xrpu_attach (pcici_t tag, int unit) { struct softc *sc; dev_t cdev = makedev(CDEV_MAJOR, unit); sc = (struct softc *)malloc(sizeof *sc, M_XRPU, M_WAITOK); softc[unit] = sc; bzero(sc, sizeof *sc); sc->tag = tag; sc->mode = NORMAL; pci_map_mem(tag, PCI_MAP_REG_START, &sc->virbase, &sc->physbase); sc->virbase62 = (u_int *)(sc->virbase + 0x800000); if (bootverbose) printf("Mapped physbase %#lx to virbase %#lx\n", (u_long)sc->physbase, (u_long)sc->virbase); if (!unit) cdevsw_add(&cdev, &xrpudevsw, NULL); +#ifdef DEVFS devfs_add_devswf(&xrpudevsw, 0, DV_CHR, UID_ROOT, GID_WHEEL, 0600, "xrpu%d", unit); +#endif } Index: head/sys/sys/systm.h =================================================================== --- head/sys/sys/systm.h (revision 44665) +++ head/sys/sys/systm.h (revision 44666) @@ -1,313 +1,311 @@ /*- * Copyright (c) 1982, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 - * $Id: systm.h,v 1.85 1999/01/28 00:57:54 dillon Exp $ + * $Id: systm.h,v 1.86 1999/03/05 19:27:22 bde Exp $ */ #ifndef _SYS_SYSTM_H_ #define _SYS_SYSTM_H_ #include #include extern int securelevel; /* system security level (see init(8)) */ extern int cold; /* nonzero if we are doing a cold boot */ extern const char *panicstr; /* panic message */ extern int safepri; /* safe ipl when cold or panicing */ extern char version[]; /* system version */ extern char copyright[]; /* system copyright */ extern int nblkdev; /* number of entries in bdevsw */ extern int nchrdev; /* number of entries in cdevsw */ extern int nswap; /* size of swap space */ extern int selwait; /* select timeout address */ extern u_char curpriority; /* priority of current process */ extern int physmem; /* physical memory */ extern dev_t dumpdev; /* dump device */ extern long dumplo; /* offset into dumpdev */ extern dev_t rootdev; /* root device */ extern dev_t rootdevs[2]; /* possible root devices */ extern char *rootdevnames[2]; /* names of possible root devices */ extern struct vnode *rootvp; /* vnode equivalent to above */ extern struct vnode *swapdev_vp;/* vnode for swap device */ extern int boothowto; /* reboot flags, from console subsystem */ extern int bootverbose; /* nonzero to print verbose messages */ #ifdef INVARIANTS /* The option is always available */ #define KASSERT(exp,msg) do { if (!(exp)) panic msg; } while (0) #else #define KASSERT(exp,msg) #endif /* * General function declarations. */ struct clockframe; struct malloc_type; struct proc; struct timeval; struct tty; struct uio; void Debugger __P((const char *msg)); int nullop __P((void)); int eopnotsupp __P((void)); int einval __P((void)); int seltrue __P((dev_t dev, int which, struct proc *p)); int ureadc __P((int, struct uio *)); void *hashinit __P((int count, struct malloc_type *type, u_long *hashmask)); void *phashinit __P((int count, struct malloc_type *type, u_long *nentries)); void panic __P((const char *, ...)) __dead2 __printflike(1, 2); void cpu_boot __P((int)); void cpu_rootconf __P((void)); void cpu_dumpconf __P((void)); void tablefull __P((const char *)); int addlog __P((const char *, ...)) __printflike(1, 2); int kvprintf __P((char const *, void (*)(int, void*), void *, int, _BSD_VA_LIST_)) __printflike(1, 0); void log __P((int, const char *, ...)) __printflike(2, 3); void logwakeup __P((void)); int printf __P((const char *, ...)) __printflike(1, 2); int snprintf __P((char *, size_t, const char *, ...)) __printflike(3, 4); int sprintf __P((char *buf, const char *, ...)) __printflike(2, 3); void uprintf __P((const char *, ...)) __printflike(1, 2); void vprintf __P((const char *, _BSD_VA_LIST_)) __printflike(1, 0); int vsnprintf __P((char *, size_t, const char *, _BSD_VA_LIST_)) __printflike(3, 0); int vsprintf __P((char *buf, const char *, _BSD_VA_LIST_)) __printflike(2, 0); void ttyprintf __P((struct tty *, const char *, ...)) __printflike(2, 3); int sscanf __P((const char *, char const *, ...)); int vsscanf __P((const char *, char const *, _BSD_VA_LIST_)); u_quad_t strtouq __P((const char *, const char **, int)); quad_t strtoq __P((const char *, const char **, int base)); void bcopy __P((const void *from, void *to, size_t len)); void ovbcopy __P((const void *from, void *to, size_t len)); #ifdef __i386__ extern void (*bzero) __P((void *buf, size_t len)); #else void bzero __P((void *buf, size_t len)); #endif void *memcpy __P((void *to, const void *from, size_t len)); int copystr __P((const void *kfaddr, void *kdaddr, size_t len, size_t *lencopied)); int copyinstr __P((const void *udaddr, void *kaddr, size_t len, size_t *lencopied)); int copyin __P((const void *udaddr, void *kaddr, size_t len)); int copyout __P((const void *kaddr, void *udaddr, size_t len)); int fubyte __P((const void *base)); int subyte __P((void *base, int byte)); int suibyte __P((void *base, int byte)); long fuword __P((const void *base)); int suword __P((void *base, long word)); int fusword __P((void *base)); int susword __P((void *base, int word)); void realitexpire __P((void *)); void hardclock __P((struct clockframe *frame)); void softclock __P((void)); void statclock __P((struct clockframe *frame)); void startprofclock __P((struct proc *)); void stopprofclock __P((struct proc *)); void setstatclockrate __P((int hzrate)); - -void hardpps __P((struct timeval *tvp, long usec)); char *getenv __P((char *name)); int getenv_int __P((char *name, int *data)); extern char *kern_envp; #ifdef APM_FIXUP_CALLTODO void adjust_timeout_calltodo __P((struct timeval *time_change)); #endif /* APM_FIXUP_CALLTODO */ #include /* Initialize the world */ void consinit __P((void)); void cpu_initclocks __P((void)); void nchinit __P((void)); void usrinfoinit __P((void)); void vntblinit __P((void)); /* Finalize the world. */ void shutdown_nice __P((void)); /* * Kernel to clock driver interface. */ void inittodr __P((time_t base)); void resettodr __P((void)); void startrtclock __P((void)); /* Timeouts */ typedef void timeout_t __P((void *)); /* timeout function type */ #define CALLOUT_HANDLE_INITIALIZER(handle) \ { NULL } void callout_handle_init __P((struct callout_handle *)); struct callout_handle timeout __P((timeout_t *, void *, int)); void untimeout __P((timeout_t *, void *, struct callout_handle)); /* Interrupt management */ #ifdef __i386__ void setdelayed __P((void)); void setsoftast __P((void)); void setsoftcambio __P((void)); void setsoftcamnet __P((void)); void setsoftclock __P((void)); void setsoftnet __P((void)); void setsofttty __P((void)); void setsoftvm __P((void)); void schedsoftcamnet __P((void)); void schedsoftcambio __P((void)); void schedsoftnet __P((void)); void schedsofttty __P((void)); void schedsoftvm __P((void)); intrmask_t softclockpending __P((void)); void spl0 __P((void)); intrmask_t splbio __P((void)); intrmask_t splcam __P((void)); intrmask_t splclock __P((void)); intrmask_t splhigh __P((void)); intrmask_t splimp __P((void)); intrmask_t splnet __P((void)); #ifdef SMP intrmask_t splq __P((intrmask_t mask)); #endif intrmask_t splsoftcam __P((void)); intrmask_t splsoftcambio __P((void)); intrmask_t splsoftcamnet __P((void)); intrmask_t splsoftclock __P((void)); intrmask_t splsofttty __P((void)); intrmask_t splsoftvm __P((void)); intrmask_t splstatclock __P((void)); intrmask_t spltty __P((void)); intrmask_t splvm __P((void)); void splx __P((intrmask_t ipl)); void splz __P((void)); #endif /* __i386__ */ #ifdef __alpha__ #include #endif /* * XXX It's not clear how "machine independent" these will be yet, but * they are used all over the place especially in pci drivers. We would * have to modify lots of drivers since no longer * implicitly causes these to be defined when it #included */ extern intrmask_t bio_imask; /* group of interrupts masked with splbio() */ extern intrmask_t cam_imask; /* group of interrupts masked with splcam() */ extern intrmask_t net_imask; /* group of interrupts masked with splimp() */ extern intrmask_t stat_imask; /* interrupts masked with splstatclock() */ extern intrmask_t tty_imask; /* group of interrupts masked with spltty() */ /* Read only */ extern const intrmask_t soft_imask; /* interrupts masked with splsoft*() */ extern const intrmask_t softnet_imask; /* interrupt masked with splnet() */ extern const intrmask_t softtty_imask; /* interrupt masked with splsofttty() */ /* * Various callout lists. */ /* Exit callout list declarations. */ typedef void (*exitlist_fn) __P((struct proc *procp)); int at_exit __P((exitlist_fn function)); int rm_at_exit __P((exitlist_fn function)); /* Fork callout list declarations. */ typedef void (*forklist_fn) __P((struct proc *parent, struct proc *child, int flags)); int at_fork __P((forklist_fn function)); int rm_at_fork __P((forklist_fn function)); /* Shutdown callout list definitions and declarations. */ #define SHUTDOWN_PRE_SYNC 0 #define SHUTDOWN_POST_SYNC 1 #define SHUTDOWN_FINAL 2 #define SHUTDOWN_PRI_FIRST 0 #define SHUTDOWN_PRI_DEFAULT 10000 #define SHUTDOWN_PRI_LAST 20000 typedef void (*bootlist_fn) __P((int, void *)); int at_shutdown __P((bootlist_fn function, void *arg, int position)); int at_shutdown_pri __P((bootlist_fn function, void *arg, int position, int pri)); int rm_at_shutdown __P((bootlist_fn function, void *arg)); /* * Not exactly a callout LIST, but a callout entry. * Allow an external module to define a hardware watchdog tickler. * Normally a process would do this, but there are times when the * kernel needs to be able to hold off the watchdog, when the process * is not active, e.g., when dumping core. */ typedef void (*watchdog_tickle_fn) __P((void)); extern watchdog_tickle_fn wdog_tickler; /* * Common `proc' functions are declared here so that proc.h can be included * less often. */ int tsleep __P((void *chan, int pri, const char *wmesg, int timo)); int asleep __P((void *chan, int pri, const char *wmesg, int timo)); int await __P((int pri, int timo)); void wakeup __P((void *chan)); #endif /* !_SYS_SYSTM_H_ */ Index: head/sys/sys/time.h =================================================================== --- head/sys/sys/time.h (revision 44665) +++ head/sys/sys/time.h (revision 44666) @@ -1,300 +1,299 @@ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)time.h 8.5 (Berkeley) 5/4/95 - * $Id: time.h,v 1.35 1998/12/15 17:38:32 des Exp $ + * $Id: time.h,v 1.36 1999/03/08 12:36:14 phk Exp $ */ #ifndef _SYS_TIME_H_ #define _SYS_TIME_H_ #include /* * Structure returned by gettimeofday(2) system call, * and used in other calls. */ struct timeval { long tv_sec; /* seconds */ long tv_usec; /* and microseconds */ }; #ifndef _TIMESPEC_DECLARED #define _TIMESPEC_DECLARED struct timespec { time_t tv_sec; /* seconds */ long tv_nsec; /* and nanoseconds */ }; #endif #define TIMEVAL_TO_TIMESPEC(tv, ts) \ do { \ (ts)->tv_sec = (tv)->tv_sec; \ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ } while (0) #define TIMESPEC_TO_TIMEVAL(tv, ts) \ do { \ (tv)->tv_sec = (ts)->tv_sec; \ (tv)->tv_usec = (ts)->tv_nsec / 1000; \ } while (0) struct timezone { int tz_minuteswest; /* minutes west of Greenwich */ int tz_dsttime; /* type of dst correction */ }; #define DST_NONE 0 /* not on dst */ #define DST_USA 1 /* USA style dst */ #define DST_AUST 2 /* Australian style dst */ #define DST_WET 3 /* Western European dst */ #define DST_MET 4 /* Middle European dst */ #define DST_EET 5 /* Eastern European dst */ #define DST_CAN 6 /* Canada */ /* * Structure used to interface to the machine dependent hardware support * for timekeeping. * * A timecounter is a (hard or soft) binary counter which has two properties: * * it runs at a fixed, known frequency. * * it must not roll over in less than (1 + delta)/HZ seconds. "delta" * is expected to be less than 20 msec, but no hard data has been * collected on this. 16 bit at 5 MHz (31 msec) is known to work. * * get_timecount() reads the counter. * * counter_mask removes unimplemented bits from the count value. * * frequency is the counter frequency in hz. * * name is a short mnemonic name for this counter. * * cost is a measure of how long time it takes to read the counter. * * adjustment [PPM << 16] which means that the smallest unit of correction * you can apply amounts to 481.5 usec/year. * * scale_micro [2^32 * usec/tick]. * scale_nano_i [ns/tick]. * scale_nano_f [(ns/2^32)/tick]. * * offset_count is the contents of the counter which corresponds to the * rest of the offset_* values. * * offset_sec [s]. * offset_micro [usec]. * offset_nano [ns/2^32] is misnamed, the real unit is .23283064365... * attoseconds (10E-18) and before you ask: yes, they are in fact * called attoseconds, it comes from "atten" for 18 in Danish/Swedish. * * Each timecounter must supply an array of three timecounters, this is needed * to guarantee atomicity in the code. Index zero is used to transport * modifications, for instance done with sysctl, into the timecounter being * used in a safe way. Such changes may be adopted with a delay of up to 1/HZ, * index one & two are used alternately for the actual timekeeping. * * `other' points to the opposite "work" timecounter, ie, in index one it * points to index two and vice versa * * `tweak' points to index zero. * */ struct timecounter; typedef unsigned timecounter_get_t __P((struct timecounter *)); typedef void timecounter_pps_t __P((struct timecounter *)); struct timecounter { /* These fields must be initialized by the driver. */ timecounter_get_t *tc_get_timecount; timecounter_pps_t *tc_poll_pps; unsigned tc_counter_mask; u_int32_t tc_frequency; char *tc_name; void *tc_priv; /* These fields will be managed by the generic code. */ int64_t tc_adjustment; u_int32_t tc_scale_micro; u_int32_t tc_scale_nano_i; u_int32_t tc_scale_nano_f; unsigned tc_offset_count; u_int32_t tc_offset_sec; u_int32_t tc_offset_micro; u_int64_t tc_offset_nano; struct timeval tc_microtime; struct timespec tc_nanotime; struct timecounter *tc_other; struct timecounter *tc_tweak; }; #ifdef KERNEL /* Operations on timespecs */ #define timespecclear(tvp) ((tvp)->tv_sec = (tvp)->tv_nsec = 0) #define timespecisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec) #define timespeccmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define timespecadd(vvp, uvp) \ do { \ (vvp)->tv_sec += (uvp)->tv_sec; \ (vvp)->tv_nsec += (uvp)->tv_nsec; \ if ((vvp)->tv_nsec >= 1000000000) { \ (vvp)->tv_sec++; \ (vvp)->tv_nsec -= 1000000000; \ } \ } while (0) #define timespecsub(vvp, uvp) \ do { \ (vvp)->tv_sec -= (uvp)->tv_sec; \ (vvp)->tv_nsec -= (uvp)->tv_nsec; \ if ((vvp)->tv_nsec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_nsec += 1000000000; \ } \ } while (0) /* Operations on timevals. */ #define timevalclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define timevalisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define timevalcmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) /* timevaladd and timevalsub are not inlined */ #endif /* KERNEL */ #ifndef KERNEL /* NetBSD/OpenBSD compatable interfaces */ #define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define timercmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define timeradd(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ if ((vvp)->tv_usec >= 1000000) { \ (vvp)->tv_sec++; \ (vvp)->tv_usec -= 1000000; \ } \ } while (0) #define timersub(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ if ((vvp)->tv_usec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_usec += 1000000; \ } \ } while (0) #endif /* * Names of the interval timers, and structure * defining a timer setting. */ #define ITIMER_REAL 0 #define ITIMER_VIRTUAL 1 #define ITIMER_PROF 2 struct itimerval { struct timeval it_interval; /* timer interval */ struct timeval it_value; /* current value */ }; /* * Getkerninfo clock information structure */ struct clockinfo { int hz; /* clock frequency */ int tick; /* micro-seconds per hz tick */ int tickadj; /* clock skew rate for adjtime() */ int stathz; /* statistics clock frequency */ int profhz; /* profiling clock frequency */ }; /* CLOCK_REALTIME and TIMER_ABSTIME are supposed to be in time.h */ #ifndef CLOCK_REALTIME #define CLOCK_REALTIME 0 #endif #define CLOCK_VIRTUAL 1 #define CLOCK_PROF 2 #define TIMER_RELTIME 0x0 /* relative timer */ #ifndef TIMER_ABSTIME #define TIMER_ABSTIME 0x1 /* absolute timer */ #endif #ifdef KERNEL extern struct timecounter *timecounter; extern time_t time_second; void getmicrouptime __P((struct timeval *tv)); void getmicrotime __P((struct timeval *tv)); void getnanouptime __P((struct timespec *tv)); void getnanotime __P((struct timespec *tv)); void init_timecounter __P((struct timecounter *tc)); int itimerdecr __P((struct itimerval *itp, int usec)); int itimerfix __P((struct timeval *tv)); void microuptime __P((struct timeval *tv)); void microtime __P((struct timeval *tv)); void nanouptime __P((struct timespec *ts)); void nanotime __P((struct timespec *ts)); void set_timecounter __P((struct timespec *ts)); -void timecounter_timespec __P((unsigned count, struct timespec *ts)); void timevaladd __P((struct timeval *, struct timeval *)); void timevalsub __P((struct timeval *, struct timeval *)); int tvtohz __P((struct timeval *)); #else /* !KERNEL */ #include #include __BEGIN_DECLS int adjtime __P((const struct timeval *, struct timeval *)); int getitimer __P((int, struct itimerval *)); int gettimeofday __P((struct timeval *, struct timezone *)); int setitimer __P((int, const struct itimerval *, struct itimerval *)); int settimeofday __P((const struct timeval *, const struct timezone *)); int utimes __P((const char *, const struct timeval *)); __END_DECLS #endif /* !KERNEL */ #endif /* !_SYS_TIME_H_ */ Index: head/sys/sys/timepps.h =================================================================== --- head/sys/sys/timepps.h (revision 44665) +++ head/sys/sys/timepps.h (revision 44666) @@ -1,99 +1,176 @@ /* * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * - * $Id: timepps.h,v 1.3 1998/06/13 09:30:24 phk Exp $ + * $Id: timepps.h,v 1.4 1998/06/22 21:09:10 phk Exp $ * * The is a FreeBSD protype version of the "draft-mogul-pps-api-02.txt" * specification for Pulse Per Second timing interfaces. * */ #ifndef _SYS_TIMEPPS_H_ #define _SYS_TIMEPPS_H_ #include typedef int pps_handle_t; typedef unsigned pps_seq_t; typedef struct ntp_fp { unsigned int integral; unsigned int fractional; } ntp_fp_t; typedef union pps_timeu { struct timespec tspec; ntp_fp_t ntpfp; unsigned long longpair[2]; } pps_timeu_t; typedef struct { pps_seq_t assert_sequence; /* assert event seq # */ pps_seq_t clear_sequence; /* clear event seq # */ pps_timeu_t assert_tu; pps_timeu_t clear_tu; int current_mode; /* current mode bits */ } pps_info_t; #define assert_timestamp assert_tu.tspec #define clear_timestamp clear_tu.tspec #define assert_timestamp_ntpfp assert_tu.ntpfp #define clear_timestamp_ntpfp clear_tu.ntpfp typedef struct { int mode; /* mode bits */ pps_timeu_t assert_off_tu; pps_timeu_t clear_off_tu; } pps_params_t; #define assert_offset assert_off_tu.tspec #define clear_offset clear_off_tu.tspec #define assert_offset_ntpfp assert_off_tu.ntpfp #define clear_offset_ntpfp clear_off_tu.ntpfp #define PPS_CAPTUREASSERT 0x01 #define PPS_CAPTURECLEAR 0x02 #define PPS_CAPTUREBOTH 0x03 #define PPS_OFFSETASSERT 0x10 #define PPS_OFFSETCLEAR 0x20 #define PPS_HARDPPSONASSERT 0x04 #define PPS_HARDPPSONCLEAR 0x08 #define PPS_ECHOASSERT 0x40 #define PPS_ECHOCLEAR 0x80 #define PPS_CANWAIT 0x100 #define PPS_TSFMT_TSPEC 0x1000 #define PPS_TSFMT_NTPFP 0x2000 struct pps_wait_args { struct timespec timeout; pps_info_t pps_info_buf; }; #define PPS_IOC_CREATE _IO('1', 1) #define PPS_IOC_DESTROY _IO('1', 2) #define PPS_IOC_SETPARAMS _IOW('1', 3, pps_params_t) #define PPS_IOC_GETPARAMS _IOR('1', 4, pps_params_t) #define PPS_IOC_GETCAP _IOR('1', 5, int) #define PPS_IOC_FETCH _IOWR('1', 6, pps_info_t) #define PPS_IOC_WAIT _IOWR('1', 6, struct pps_wait_args) #ifdef KERNEL -int std_pps_ioctl __P((u_long cmd, caddr_t data, pps_params_t *pp, - pps_info_t *pi, int ppscap)); +struct pps_state { + pps_params_t ppsparam; + pps_info_t ppsinfo; + int ppscap; + struct timecounter *ppstc; + unsigned ppscount[3]; +}; -#endif /* KERNEL */ +void pps_event __P((struct pps_state *pps, struct timecounter *tc, unsigned count, int event)); +void pps_init __P((struct pps_state *pps)); +int pps_ioctl __P((u_long cmd, caddr_t data, struct pps_state *pps)); +void hardpps __P((struct timespec *tsp, long nsec)); + +#else /* !KERNEL */ + +int time_pps_create(int filedes, pps_handle_t *handle); +int time_pps_destroy(pps_handle_t handle); +int time_pps_setparams(pps_handle_t handle, const pps_params_t *ppsparams); +int time_pps_getparams(pps_handle_t handle, pps_params_t *ppsparams); +int time_pps_getcap(pps_handle_t handle, int *mode); +int time_pps_fetch(pps_handle_t handle, pps_info_t *ppsinfobuf); +int time_pps_wait(pps_handle_t handle, const struct timespec *timeout, + pps_info_t *ppsinfobuf); + +static __inline int +time_pps_create(int filedes, pps_handle_t *handle) +{ + int error; + + *handle = -1; + error = ioctl(filedes, PPS_IOC_CREATE, 0); + if (error < 0) + return (-1); + *handle = filedes; + return (0); +} + +static __inline int +time_pps_destroy(pps_handle_t handle) +{ + return (ioctl(handle, PPS_IOC_DESTROY, 0)); +} + +static __inline int +time_pps_setparams(pps_handle_t handle, const pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_SETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getparams(pps_handle_t handle, pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_GETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getcap(pps_handle_t handle, int *mode) +{ + return (ioctl(handle, PPS_IOC_GETCAP, mode)); +} + +static __inline int +time_pps_fetch(pps_handle_t handle, pps_info_t *ppsinfobuf) +{ + return (ioctl(handle, PPS_IOC_FETCH, ppsinfobuf)); +} + +static __inline int +time_pps_wait(pps_handle_t handle, const struct timespec *timeout, + pps_info_t *ppsinfobuf) +{ + int error; + struct pps_wait_args arg; + + arg.timeout = *timeout; + error = ioctl(handle, PPS_IOC_WAIT, &arg); + *ppsinfobuf = arg.pps_info_buf; + return (error); +} + +#endif /* !KERNEL */ #endif /* _SYS_TIMEPPS_H_ */ Index: head/sys/sys/timetc.h =================================================================== --- head/sys/sys/timetc.h (revision 44665) +++ head/sys/sys/timetc.h (revision 44666) @@ -1,300 +1,299 @@ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)time.h 8.5 (Berkeley) 5/4/95 - * $Id: time.h,v 1.35 1998/12/15 17:38:32 des Exp $ + * $Id: time.h,v 1.36 1999/03/08 12:36:14 phk Exp $ */ #ifndef _SYS_TIME_H_ #define _SYS_TIME_H_ #include /* * Structure returned by gettimeofday(2) system call, * and used in other calls. */ struct timeval { long tv_sec; /* seconds */ long tv_usec; /* and microseconds */ }; #ifndef _TIMESPEC_DECLARED #define _TIMESPEC_DECLARED struct timespec { time_t tv_sec; /* seconds */ long tv_nsec; /* and nanoseconds */ }; #endif #define TIMEVAL_TO_TIMESPEC(tv, ts) \ do { \ (ts)->tv_sec = (tv)->tv_sec; \ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ } while (0) #define TIMESPEC_TO_TIMEVAL(tv, ts) \ do { \ (tv)->tv_sec = (ts)->tv_sec; \ (tv)->tv_usec = (ts)->tv_nsec / 1000; \ } while (0) struct timezone { int tz_minuteswest; /* minutes west of Greenwich */ int tz_dsttime; /* type of dst correction */ }; #define DST_NONE 0 /* not on dst */ #define DST_USA 1 /* USA style dst */ #define DST_AUST 2 /* Australian style dst */ #define DST_WET 3 /* Western European dst */ #define DST_MET 4 /* Middle European dst */ #define DST_EET 5 /* Eastern European dst */ #define DST_CAN 6 /* Canada */ /* * Structure used to interface to the machine dependent hardware support * for timekeeping. * * A timecounter is a (hard or soft) binary counter which has two properties: * * it runs at a fixed, known frequency. * * it must not roll over in less than (1 + delta)/HZ seconds. "delta" * is expected to be less than 20 msec, but no hard data has been * collected on this. 16 bit at 5 MHz (31 msec) is known to work. * * get_timecount() reads the counter. * * counter_mask removes unimplemented bits from the count value. * * frequency is the counter frequency in hz. * * name is a short mnemonic name for this counter. * * cost is a measure of how long time it takes to read the counter. * * adjustment [PPM << 16] which means that the smallest unit of correction * you can apply amounts to 481.5 usec/year. * * scale_micro [2^32 * usec/tick]. * scale_nano_i [ns/tick]. * scale_nano_f [(ns/2^32)/tick]. * * offset_count is the contents of the counter which corresponds to the * rest of the offset_* values. * * offset_sec [s]. * offset_micro [usec]. * offset_nano [ns/2^32] is misnamed, the real unit is .23283064365... * attoseconds (10E-18) and before you ask: yes, they are in fact * called attoseconds, it comes from "atten" for 18 in Danish/Swedish. * * Each timecounter must supply an array of three timecounters, this is needed * to guarantee atomicity in the code. Index zero is used to transport * modifications, for instance done with sysctl, into the timecounter being * used in a safe way. Such changes may be adopted with a delay of up to 1/HZ, * index one & two are used alternately for the actual timekeeping. * * `other' points to the opposite "work" timecounter, ie, in index one it * points to index two and vice versa * * `tweak' points to index zero. * */ struct timecounter; typedef unsigned timecounter_get_t __P((struct timecounter *)); typedef void timecounter_pps_t __P((struct timecounter *)); struct timecounter { /* These fields must be initialized by the driver. */ timecounter_get_t *tc_get_timecount; timecounter_pps_t *tc_poll_pps; unsigned tc_counter_mask; u_int32_t tc_frequency; char *tc_name; void *tc_priv; /* These fields will be managed by the generic code. */ int64_t tc_adjustment; u_int32_t tc_scale_micro; u_int32_t tc_scale_nano_i; u_int32_t tc_scale_nano_f; unsigned tc_offset_count; u_int32_t tc_offset_sec; u_int32_t tc_offset_micro; u_int64_t tc_offset_nano; struct timeval tc_microtime; struct timespec tc_nanotime; struct timecounter *tc_other; struct timecounter *tc_tweak; }; #ifdef KERNEL /* Operations on timespecs */ #define timespecclear(tvp) ((tvp)->tv_sec = (tvp)->tv_nsec = 0) #define timespecisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec) #define timespeccmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define timespecadd(vvp, uvp) \ do { \ (vvp)->tv_sec += (uvp)->tv_sec; \ (vvp)->tv_nsec += (uvp)->tv_nsec; \ if ((vvp)->tv_nsec >= 1000000000) { \ (vvp)->tv_sec++; \ (vvp)->tv_nsec -= 1000000000; \ } \ } while (0) #define timespecsub(vvp, uvp) \ do { \ (vvp)->tv_sec -= (uvp)->tv_sec; \ (vvp)->tv_nsec -= (uvp)->tv_nsec; \ if ((vvp)->tv_nsec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_nsec += 1000000000; \ } \ } while (0) /* Operations on timevals. */ #define timevalclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define timevalisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define timevalcmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) /* timevaladd and timevalsub are not inlined */ #endif /* KERNEL */ #ifndef KERNEL /* NetBSD/OpenBSD compatable interfaces */ #define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define timercmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define timeradd(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ if ((vvp)->tv_usec >= 1000000) { \ (vvp)->tv_sec++; \ (vvp)->tv_usec -= 1000000; \ } \ } while (0) #define timersub(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ if ((vvp)->tv_usec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_usec += 1000000; \ } \ } while (0) #endif /* * Names of the interval timers, and structure * defining a timer setting. */ #define ITIMER_REAL 0 #define ITIMER_VIRTUAL 1 #define ITIMER_PROF 2 struct itimerval { struct timeval it_interval; /* timer interval */ struct timeval it_value; /* current value */ }; /* * Getkerninfo clock information structure */ struct clockinfo { int hz; /* clock frequency */ int tick; /* micro-seconds per hz tick */ int tickadj; /* clock skew rate for adjtime() */ int stathz; /* statistics clock frequency */ int profhz; /* profiling clock frequency */ }; /* CLOCK_REALTIME and TIMER_ABSTIME are supposed to be in time.h */ #ifndef CLOCK_REALTIME #define CLOCK_REALTIME 0 #endif #define CLOCK_VIRTUAL 1 #define CLOCK_PROF 2 #define TIMER_RELTIME 0x0 /* relative timer */ #ifndef TIMER_ABSTIME #define TIMER_ABSTIME 0x1 /* absolute timer */ #endif #ifdef KERNEL extern struct timecounter *timecounter; extern time_t time_second; void getmicrouptime __P((struct timeval *tv)); void getmicrotime __P((struct timeval *tv)); void getnanouptime __P((struct timespec *tv)); void getnanotime __P((struct timespec *tv)); void init_timecounter __P((struct timecounter *tc)); int itimerdecr __P((struct itimerval *itp, int usec)); int itimerfix __P((struct timeval *tv)); void microuptime __P((struct timeval *tv)); void microtime __P((struct timeval *tv)); void nanouptime __P((struct timespec *ts)); void nanotime __P((struct timespec *ts)); void set_timecounter __P((struct timespec *ts)); -void timecounter_timespec __P((unsigned count, struct timespec *ts)); void timevaladd __P((struct timeval *, struct timeval *)); void timevalsub __P((struct timeval *, struct timeval *)); int tvtohz __P((struct timeval *)); #else /* !KERNEL */ #include #include __BEGIN_DECLS int adjtime __P((const struct timeval *, struct timeval *)); int getitimer __P((int, struct itimerval *)); int gettimeofday __P((struct timeval *, struct timezone *)); int setitimer __P((int, const struct itimerval *, struct itimerval *)); int settimeofday __P((const struct timeval *, const struct timezone *)); int utimes __P((const char *, const struct timeval *)); __END_DECLS #endif /* !KERNEL */ #endif /* !_SYS_TIME_H_ */ Index: head/sys/sys/timex.h =================================================================== --- head/sys/sys/timex.h (revision 44665) +++ head/sys/sys/timex.h (revision 44666) @@ -1,229 +1,228 @@ /*********************************************************************** * * * Copyright (c) David L. Mills 1993-1998 * * * * Permission to use, copy, modify, and distribute this software and * * its documentation for any purpose and without fee is hereby * * granted, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission * * notice appear in supporting documentation, and that the name * * University of Delaware not be used in advertising or publicity * * pertaining to distribution of the software without specific, * * written prior permission. The University of Delaware makes no * * representations about the suitability this software for any * * purpose. It is provided "as is" without express or implied * * warranty. * * * **********************************************************************/ /* * Modification history timex.h * * 17 Nov 98 David L. Mills * Revised for nanosecond kernel and user interface. * * 26 Sep 94 David L. Mills * Added defines for hybrid phase/frequency-lock loop. * * 19 Mar 94 David L. Mills * Moved defines from kernel routines to header file and added new * defines for PPS phase-lock loop. * * 20 Feb 94 David L. Mills * Revised status codes and structures for external clock and PPS * signal discipline. * * 28 Nov 93 David L. Mills * Adjusted parameters to improve stability and increase poll * interval. * * 17 Sep 93 David L. Mills * Created file */ /* * This header file defines the Network Time Protocol (NTP) interfaces * for user and daemon application programs. These are implemented using * defined syscalls and data structures and require specific kernel * support. * * The original precision time kernels developed from 1993 have an * ultimate resolution of one microsecond; however, the most recent * kernels have an ultimate resolution of one nanosecond. In these * kernels, a ntp_adjtime() syscalls can be used to determine which * resolution is in use and to select either one at any time. The * resolution selected affects the scaling of certain fields in the * ntp_gettime() and ntp_adjtime() syscalls, as described below. * * NAME * ntp_gettime - NTP user application interface * * SYNOPSIS * #include - * #include * - * int syscall(SYS_ntp_gettime, tptr); - * int SYS_ntp_gettime; - * struct ntptimeval *tptr; + * int ntp_gettime(struct ntptimeval *ntv); * * DESCRIPTION - * The time returned by ntp_gettime() is in a timeval structure, + * The time returned by ntp_gettime() is in a timespec structure, * but may be in either microsecond (seconds and microseconds) or * nanosecond (seconds and nanoseconds) format. The particular * format in use is determined by the STA_NANO bit of the status * word returned by the ntp_adjtime() syscall. * * NAME * ntp_adjtime - NTP daemon application interface * * SYNOPSIS * #include * #include * * int syscall(SYS_ntp_adjtime, tptr); + * int SYS_ntp_adjtime; * struct timex *tptr; * * DESCRIPTION * Certain fields of the timex structure are interpreted in either * microseconds or nanoseconds according to the state of the * STA_NANO bit in the status word. See the description below for * further information. */ #ifndef _SYS_TIMEX_H_ -#define _SYS_TIMEX_H_ 1 +#define _SYS_TIMEX_H_ #ifndef MSDOS /* Microsoft specific */ #include #endif /* MSDOS */ /* * The following defines establish the performance envelope of the * kernel discipline loop. Phase or frequency errors greater than * NAXPHASE or MAXFREQ are clamped to these maxima. For update intervals * less than MINSEC, the loop always operates in PLL mode; while, for * update intervals greater than MAXSEC, the loop always operates in FLL * mode. Between these two limits the operating mode is selected by the * STA_FLL bit in the status word. */ #define MAXPHASE 500000000L /* max phase error (ns) */ #define MAXFREQ 500000L /* max freq error (ns/s) */ #define MINSEC 256 /* min FLL update interval (s) */ #define MAXSEC 1600 /* max PLL update interval (s) */ #define NANOSECOND 1000000000L /* nanoseconds in one second */ #define SCALE_PPM (65536 / 1000) /* crude ns/s to scaled PPM */ #define MAXTC 10 /* max time constant in PLL mode */ /* * The following defines and structures define the user interface for * the ntp_gettime() and ntp_adjtime() syscalls. * * Control mode codes (timex.modes and nanotimex.modes) */ #define MOD_OFFSET 0x0001 /* set time offset */ #define MOD_FREQUENCY 0x0002 /* set frequency offset */ #define MOD_MAXERROR 0x0004 /* set maximum time error */ #define MOD_ESTERROR 0x0008 /* set estimated time error */ #define MOD_STATUS 0x0010 /* set clock status bits */ #define MOD_TIMECONST 0x0020 /* set PLL time constant */ #define MOD_PLL 0x0400 /* select default PLL mode */ #define MOD_FLL 0x0800 /* select default FLL mode */ #define MOD_MICRO 0x1000 /* select microsecond resolution */ #define MOD_NANO 0x2000 /* select nanosecond resolution */ #define MOD_CLKB 0x4000 /* select clock B */ #define MOD_CLKA 0x8000 /* select clock A */ /* * Status codes (timex.status) */ #define STA_PLL 0x0001 /* enable PLL updates (rw) */ #define STA_PPSFREQ 0x0002 /* enable PPS freq discipline (rw) */ #define STA_PPSTIME 0x0004 /* enable PPS time discipline (rw) */ #define STA_FLL 0x0008 /* enable FLL mode (rw) */ #define STA_INS 0x0010 /* insert leap (rw) */ #define STA_DEL 0x0020 /* delete leap (rw) */ #define STA_UNSYNC 0x0040 /* clock unsynchronized (rw) */ #define STA_FREQHOLD 0x0080 /* hold frequency (rw) */ #define STA_PPSSIGNAL 0x0100 /* PPS signal present (ro) */ #define STA_PPSJITTER 0x0200 /* PPS signal jitter exceeded (ro) */ #define STA_PPSWANDER 0x0400 /* PPS signal wander exceeded (ro) */ #define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */ #define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */ #define STA_NANO 0x2000 /* resolution (0 = us, 1 = ns) (ro) */ #define STA_MODE 0x4000 /* mode (0 = PLL, 1 = FLL) (ro) */ #define STA_CLK 0x8000 /* clock source (0 = A, 1 = B) (ro) */ #define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \ STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK) /* * Clock states (time_state) */ #define TIME_OK 0 /* no leap second warning */ #define TIME_INS 1 /* insert leap second warning */ #define TIME_DEL 2 /* delete leap second warning */ #define TIME_OOP 3 /* leap second in progress */ #define TIME_WAIT 4 /* leap second has occured */ #define TIME_ERROR 5 /* error (see status word) */ /* * NTP user interface (ntp_gettime()) - used to read kernel clock values * * Note: The time member is in microseconds if STA_NANO is zero and * nanoseconds if not. */ struct ntptimeval { - struct timespec time; /* current time (ns) (ro) */ + struct timespec time; /* current time (ns/us) (ro) */ long maxerror; /* maximum error (us) (ro) */ long esterror; /* estimated error (us) (ro) */ int time_state; /* time status */ }; /* * NTP daemon interface (ntp_adjtime()) - used to discipline CPU clock * oscillator and determine status. * * Note: The offset, precision and jitter members are in microseconds if * STA_NANO is zero and nanoseconds if not. */ struct timex { unsigned int modes; /* clock mode bits (wo) */ long offset; /* time offset (ns/us) (rw) */ long freq; /* frequency offset (scaled PPM) (rw) */ long maxerror; /* maximum error (us) (rw) */ long esterror; /* estimated error (us) (rw) */ int status; /* clock status bits (rw) */ long constant; /* poll interval (log2 s) (rw) */ long precision; /* clock precision (ns/us) (ro) */ long tolerance; /* clock frequency tolerance (scaled * PPM) (ro) */ /* * The following read-only structure members are implemented * only if the PPS signal discipline is configured in the * kernel. They are included in all configurations to insure * portability. */ long ppsfreq; /* PPS frequency (scaled PPM) (ro) */ long jitter; /* PPS jitter (ns/us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* PPS stability (scaled PPM) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ }; #ifdef __FreeBSD__ #ifdef KERNEL -void ntp_update_second __P((struct timecounter *tc)); -#else +struct timecounter; +void ntp_update_second __P((struct timecounter *tc)); +#else /* !KERNEL */ #include __BEGIN_DECLS -extern int ntp_gettime __P((struct ntptimeval *)); -extern int ntp_adjtime __P((struct timex *)); +int ntp_adjtime __P((struct timex *)); +int ntp_gettime __P((struct ntptimeval *)); __END_DECLS +#endif /* KERNEL */ -#endif /* not KERNEL */ - #endif /* __FreeBSD__ */ -#endif /* _SYS_TIMEX_H_ */ + +#endif /* !_SYS_TIMEX_H_ */