Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c +++ sys/kern/kern_synch.c @@ -213,13 +213,33 @@ lock_state = class->lc_unlock(lock); sleepq_lock(ident); } - if (sbt != 0 && catch) - rval = sleepq_timedwait_sig(ident, pri); - else if (sbt != 0) - rval = sleepq_timedwait(ident, pri); - else if (catch) + if (sbt != 0) { + if (priority & PRTCLK) { + thread_lock(td); + td->td_flags |= TDF_SLEEPRTC; + thread_unlock(td); + } + if (catch) + rval = sleepq_timedwait_sig(ident, pri); + else + rval = sleepq_timedwait(ident, pri); + if (priority & PRTCLK) { + thread_lock(td); + if (td->td_flags & TDF_SLEEPRTC) { + td->td_flags &= ~TDF_SLEEPRTC; + } else if (rval == 0) { + /* + * The thread was awoken by an adjustment of + * the real-time clock. It should read the + * RTC again and act on the new value. + */ + rval = ERELOOKUP; + } + thread_unlock(td); + } + } else if (catch) { rval = sleepq_wait_sig(ident, pri); - else { + } else { sleepq_wait(ident, pri); rval = 0; } Index: sys/kern/kern_tc.c =================================================================== --- sys/kern/kern_tc.c +++ sys/kern/kern_tc.c @@ -28,10 +28,13 @@ #include #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -1261,6 +1264,50 @@ return (timehands->th_counter->tc_frequency); } +static struct timeout_task rtc_sleepers_awake_task; + +static bool +sleeping_on_rtc(struct thread *td) +{ + + THREAD_LOCK_ASSERT(td, MA_OWNED); + if (td->td_flags & TDF_SLEEPRTC) { + td->td_flags &= ~TDF_SLEEPRTC; + return (true); + } + + return (false); +} + +static void +rtc_sleepers_awake(void *context __unused, int pending __unused) +{ + sleepq_remove_matching(sleeping_on_rtc); +#if 0 + struct proc *p; + struct thread *td; + + sx_slock(&allproc_lock); + FOREACH_PROC_IN_SYSTEM(p) { + PROC_LOCK(p); + FOREACH_THREAD_IN_PROC(p, td) { + if (td->td_flags & TDF_SLEEPRTC) { + thread_lock(td); + if ((td->td_flags & TDF_SLEEPRTC) != 0 && + TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) { + td->td_flags &= ~TDF_SLEEPRTC; + // cannot use wakeup(td->td_wchan); + // look up sleepq and remove td + } + thread_unlock(td); + } + } + PROC_UNLOCK(p); + } + sx_sunlock(&allproc_lock); +#endif +} + static struct mtx tc_setclock_mtx; MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx, "tcsetc", MTX_SPIN); @@ -1284,6 +1331,8 @@ /* XXX fiddle all the little crinkly bits around the fiords... */ tc_windup(&bt); mtx_unlock_spin(&tc_setclock_mtx); + taskqueue_enqueue_timeout(taskqueue_thread, &rtc_sleepers_awake_task, + hz/100); if (timestepwarnings) { nanotime(&taft); log(LOG_INFO, @@ -1965,6 +2014,9 @@ mtx_lock_spin(&tc_setclock_mtx); tc_windup(NULL); mtx_unlock_spin(&tc_setclock_mtx); + + TIMEOUT_TASK_INIT(taskqueue_thread, &rtc_sleepers_awake_task, 0, + rtc_sleepers_awake, NULL); } SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL); Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c +++ sys/kern/kern_umtx.c @@ -829,7 +829,18 @@ umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime) { struct umtxq_chain *uc; - int error, timo; + int error, timo, priority; + + priority = PCATCH | PDROP; + if (abstime != NULL) { + switch (abstime->clockid) { + case CLOCK_REALTIME: + case CLOCK_REALTIME_PRECISE: + case CLOCK_REALTIME_FAST: + priority |= PRTCLK; + break; + } + } uc = umtxq_getchain(&uq->uq_key); UMTXQ_LOCKED_ASSERT(uc); @@ -842,8 +853,8 @@ return (ETIMEDOUT); } else timo = 0; - error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo); - if (error != EWOULDBLOCK) { + error = msleep(uq, &uc->uc_lock, priority, wmesg, timo); + if (error != EWOULDBLOCK && error != ERELOOKUP) { umtxq_lock(&uq->uq_key); break; } Index: sys/kern/subr_sleepqueue.c =================================================================== --- sys/kern/subr_sleepqueue.c +++ sys/kern/subr_sleepqueue.c @@ -26,7 +26,7 @@ /* * Implementation of sleep queues used to hold queue of threads blocked on - * a wait channel. Sleep queues different from turnstiles in that wait + * a wait channel. Sleep queues are different from turnstiles in that wait * channels are not owned by anyone, so there is no priority propagation. * Sleep queues can also provide a timeout and can also be interrupted by * signals. That said, there are several similarities between the turnstile @@ -36,7 +36,7 @@ * a linked list of queues. An individual queue is located by using a hash * to pick a chain, locking the chain, and then walking the chain searching * for the queue. This means that a wait channel object does not need to - * embed it's queue head just as locks do not embed their turnstile queue + * embed its queue head just as locks do not embed their turnstile queue * head. Threads also carry around a sleep queue that they lend to the * wait channel when blocking. Just as in turnstiles, the queue includes * a free list of the sleep queues of other threads blocked on the same @@ -1052,6 +1052,41 @@ return (sleepq_resume_thread(sq, td, 0)); } +void +sleepq_remove_matching(bool (*matches)(struct thread *)) +{ + struct sleepqueue_chain *sc; + struct sleepqueue *sq; + struct thread *td, *td_next; + int i, wakeup_swapper; + + for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) { + if (LIST_EMPTY(&sc->sc_queues)) { + continue; + } + wakeup_swapper = 0; + mtx_lock_spin(&sc->sc_lock); + LIST_FOREACH(sq, &sc->sc_queues, sq_hash) { + for (i = 0; i < NR_SLEEPQS; ++i) { + TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[i], + td_slpq, td_next) { + thread_lock(td); + if (matches(td)) { + wakeup_swapper |= + sleepq_resume_thread(sq, + td, 0); + } + thread_unlock(td); + } + } + } + mtx_unlock_spin(&sc->sc_lock); + if (wakeup_swapper) { + kick_proc0(); + } + } +} + /* * Prints the stacks of all threads presently sleeping on wchan/queue to * the sbuf sb. Sets count_stacks_printed to the number of stacks actually Index: sys/sys/param.h =================================================================== --- sys/sys/param.h +++ sys/sys/param.h @@ -215,6 +215,7 @@ #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ +#define PRTCLK 0x400 /* OR'd with pri when sleep is based on the RT clock */ #define NZERO 0 /* default "nice" */ Index: sys/sys/proc.h =================================================================== --- sys/sys/proc.h +++ sys/sys/proc.h @@ -390,7 +390,7 @@ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ -#define TDF_UNUSED12 0x00001000 /* --available-- */ +#define TDF_SLEEPRTC 0x00001000 /* Sleep is based on the real-time clock */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ Index: sys/sys/sleepqueue.h =================================================================== --- sys/sys/sleepqueue.h +++ sys/sys/sleepqueue.h @@ -95,6 +95,7 @@ struct sleepqueue *sleepq_lookup(void *wchan); void sleepq_release(void *wchan); void sleepq_remove(struct thread *td, void *wchan); +void sleepq_remove_matching(bool (*matches)(struct thread *)); int sleepq_signal(void *wchan, int flags, int pri, int queue); void sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, int flags);