diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -2068,20 +2068,16 @@ void linux_complete_common(struct completion *c, int all) { - int wakeup_swapper; - sleepq_lock(c); if (all) { c->done = UINT_MAX; - wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); + sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; - wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); + sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); - if (wakeup_swapper) - kick_proc0(); } /* diff --git a/sys/compat/linuxkpi/common/src/linux_schedule.c b/sys/compat/linuxkpi/common/src/linux_schedule.c --- a/sys/compat/linuxkpi/common/src/linux_schedule.c +++ b/sys/compat/linuxkpi/common/src/linux_schedule.c @@ -98,18 +98,16 @@ static int wake_up_task(struct task_struct *task, unsigned int state) { - int ret, wakeup_swapper; + int ret; - ret = wakeup_swapper = 0; + ret = 0; sleepq_lock(task); if ((atomic_read(&task->state) & state) != 0) { set_task_state(task, TASK_WAKING); - wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); + sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); ret = 1; } sleepq_release(task); - if (wakeup_swapper) - kick_proc0(); return (ret); } @@ -330,13 +328,9 @@ static void wake_up_sleepers(void *wchan) { - int wakeup_swapper; - sleepq_lock(wchan); - wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); + sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); sleepq_release(wchan); - if (wakeup_swapper) - kick_proc0(); } #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) diff --git a/sys/dev/qat/qat_common/adf_aer.c b/sys/dev/qat/qat_common/adf_aer.c --- a/sys/dev/qat/qat_common/adf_aer.c +++ b/sys/dev/qat/qat_common/adf_aer.c @@ -28,17 +28,13 @@ void linux_complete_common(struct completion *c, int all) { - int wakeup_swapper; - sleepq_lock(c); c->done++; if (all) - wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); + sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); else - wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); + sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); sleepq_release(c); - if (wakeup_swapper) - kick_proc0(); } /* reset dev data */ diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -427,16 +427,13 @@ } /* - * Signal a condition variable, wakes up one waiting thread. Will also wakeup - * the swapper if the process is not in memory, so that it can bring the - * sleeping process in. Note that this may also result in additional threads - * being made runnable. Should be called with the same mutex as was passed to - * cv_wait held. + * Signal a condition variable, wakes up one waiting thread. Note that this may + * also result in additional threads being made runnable. Should be called with + * the same mutex as was passed to cv_wait held. */ void cv_signal(struct cv *cvp) { - if (cvp->cv_waiters == 0) return; sleepq_lock(cvp); @@ -450,8 +447,7 @@ } else { if (cvp->cv_waiters < CV_WAITERS_BOUND) cvp->cv_waiters--; - if (sleepq_signal(cvp, SLEEPQ_CONDVAR | SLEEPQ_DROP, 0, 0)) - kick_proc0(); + sleepq_signal(cvp, SLEEPQ_CONDVAR | SLEEPQ_DROP, 0, 0); } } @@ -462,23 +458,18 @@ void cv_broadcastpri(struct cv *cvp, int pri) { - int wakeup_swapper; - if (cvp->cv_waiters == 0) return; /* * XXX sleepq_broadcast pri argument changed from -1 meaning * no pri to 0 meaning no pri. */ - wakeup_swapper = 0; if (pri == -1) pri = 0; sleepq_lock(cvp); if (cvp->cv_waiters > 0) { cvp->cv_waiters = 0; - wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0); + sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0); } sleepq_release(cvp); - if (wakeup_swapper) - kick_proc0(); } diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -186,7 +186,7 @@ uintptr_t *xp); static void -lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper) +lockmgr_exit(u_int flags, struct lock_object *ilk) { struct lock_class *class; @@ -194,9 +194,6 @@ class = LOCK_CLASS(ilk); class->lc_unlock(ilk); } - - if (__predict_false(wakeup_swapper)) - kick_proc0(); } static void @@ -310,14 +307,13 @@ return (error); } -static __inline int +static __inline void wakeupshlk(struct lock *lk, const char *file, int line) { uintptr_t v, x, orig_x; u_int realexslp; - int queue, wakeup_swapper; + int queue; - wakeup_swapper = 0; for (;;) { x = lockmgr_read_value(lk); if (lockmgr_sunlock_try(lk, &x)) @@ -361,9 +357,8 @@ LOCK_LOG2(lk, "%s: %p waking up threads on the exclusive queue", __func__, lk); - wakeup_swapper = - sleepq_broadcast(&lk->lock_object, - SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, + SQ_EXCLUSIVE_QUEUE); queue = SQ_SHARED_QUEUE; } } else { @@ -390,14 +385,12 @@ LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); - wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, - 0, queue); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); sleepq_release(&lk->lock_object); break; } LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER); - return (wakeup_swapper); } static void @@ -730,7 +723,7 @@ } out: - lockmgr_exit(flags, ilk, 0); + lockmgr_exit(flags, ilk); return (error); } @@ -968,7 +961,7 @@ } out: - lockmgr_exit(flags, ilk, 0); + lockmgr_exit(flags, ilk); return (error); } @@ -1028,7 +1021,7 @@ error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa); flags &= ~LK_INTERLOCK; out: - lockmgr_exit(flags, ilk, 0); + lockmgr_exit(flags, ilk); return (error); } @@ -1100,17 +1093,10 @@ static __noinline int lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, const char *file, int line) - { - int wakeup_swapper = 0; - - if (SCHEDULER_STOPPED()) - goto out; - - wakeup_swapper = wakeupshlk(lk, file, line); - -out: - lockmgr_exit(flags, ilk, wakeup_swapper); + if (!SCHEDULER_STOPPED()) + wakeupshlk(lk, file, line); + lockmgr_exit(flags, ilk); return (0); } @@ -1119,7 +1105,6 @@ const char *file, int line) { uintptr_t tid, v; - int wakeup_swapper = 0; u_int realexslp; int queue; @@ -1188,8 +1173,8 @@ LOCK_LOG2(lk, "%s: %p waking up threads on the exclusive queue", __func__, lk); - wakeup_swapper = sleepq_broadcast(&lk->lock_object, - SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, + SQ_EXCLUSIVE_QUEUE); queue = SQ_SHARED_QUEUE; } } else { @@ -1207,11 +1192,11 @@ __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); atomic_store_rel_ptr(&lk->lk_lock, v); - wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); sleepq_release(&lk->lock_object); out: - lockmgr_exit(flags, ilk, wakeup_swapper); + lockmgr_exit(flags, ilk); return (0); } @@ -1309,7 +1294,7 @@ const char *iwmesg; uintptr_t tid, v, x; u_int op, realexslp; - int error, ipri, itimo, queue, wakeup_swapper; + int error, ipri, itimo, queue; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; @@ -1361,7 +1346,6 @@ } } - wakeup_swapper = 0; switch (op) { case LK_SHARED: return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa)); @@ -1519,8 +1503,7 @@ LOCK_LOG2(lk, "%s: %p waking up threads on the exclusive queue", __func__, lk); - wakeup_swapper = - sleepq_broadcast( + sleepq_broadcast( &lk->lock_object, SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); @@ -1536,8 +1519,8 @@ "%s: %p waking up all threads on the %s queue", __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); - wakeup_swapper |= sleepq_broadcast( - &lk->lock_object, SLEEPQ_LK, 0, queue); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, + queue); /* * If shared waiters have been woken up we need @@ -1604,8 +1587,6 @@ if (flags & LK_INTERLOCK) class->lc_unlock(ilk); - if (wakeup_swapper) - kick_proc0(); return (error); } diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -474,7 +474,6 @@ sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) { uintptr_t x; - int wakeup_swapper; if (SCHEDULER_STOPPED()) return; @@ -516,18 +515,14 @@ * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single * shared lock. If there are any shared waiters, wake them up. */ - wakeup_swapper = 0; x = sx->sx_lock; atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | (x & SX_LOCK_EXCLUSIVE_WAITERS)); if (x & SX_LOCK_SHARED_WAITERS) - wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, - 0, SQ_SHARED_QUEUE); + sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, + SQ_SHARED_QUEUE); sleepq_release(&sx->lock_object); - if (wakeup_swapper) - kick_proc0(); - out: curthread->td_sx_slocks++; LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); @@ -920,7 +915,7 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) { uintptr_t tid, setx; - int queue, wakeup_swapper; + int queue; if (SCHEDULER_STOPPED()) return; @@ -977,11 +972,8 @@ __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); - wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, - queue); + sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); sleepq_release(&sx->lock_object); - if (wakeup_swapper) - kick_proc0(); } static __always_inline bool @@ -1333,7 +1325,6 @@ _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x LOCK_FILE_LINE_ARG_DEF) { - int wakeup_swapper = 0; uintptr_t setx, queue; if (SCHEDULER_STOPPED()) @@ -1366,14 +1357,11 @@ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p waking up all thread on" "exclusive queue", __func__, sx); - wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, - 0, queue); + sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); td->td_sx_slocks--; break; } sleepq_release(&sx->lock_object); - if (wakeup_swapper) - kick_proc0(); out_lockstat: LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -344,16 +344,9 @@ void wakeup(const void *ident) { - int wakeup_swapper; - sleepq_lock(ident); - wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); + sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); sleepq_release(ident); - if (wakeup_swapper) { - KASSERT(ident != &proc0, - ("wakeup and wakeup_swapper and proc0")); - kick_proc0(); - } } /* @@ -364,24 +357,15 @@ void wakeup_one(const void *ident) { - int wakeup_swapper; - sleepq_lock(ident); - wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_DROP, 0, 0); - if (wakeup_swapper) - kick_proc0(); + sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_DROP, 0, 0); } void wakeup_any(const void *ident) { - int wakeup_swapper; - sleepq_lock(ident); - wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR | - SLEEPQ_DROP, 0, 0); - if (wakeup_swapper) - kick_proc0(); + sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR | SLEEPQ_DROP, 0, 0); } /* diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -920,7 +920,7 @@ /* * Find thread sleeping on a wait channel and resume it. */ -int +void sleepq_signal(const void *wchan, int flags, int pri, int queue) { struct sleepqueue_chain *sc; @@ -935,7 +935,7 @@ if (sq == NULL) { if (flags & SLEEPQ_DROP) sleepq_release(wchan); - return (0); + return; } KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); @@ -971,7 +971,6 @@ MPASS(besttd != NULL); sleepq_resume_thread(sq, besttd, pri, (flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD); - return (0); } static bool @@ -984,7 +983,7 @@ /* * Resume all threads sleeping on a specified wait channel. */ -int +void sleepq_broadcast(const void *wchan, int flags, int pri, int queue) { struct sleepqueue *sq; @@ -993,18 +992,18 @@ KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); MPASS((queue >= 0) && (queue < NR_SLEEPQS)); sq = sleepq_lookup(wchan); - if (sq == NULL) - return (0); - KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), - ("%s: mismatch between sleep/wakeup and cv_*", __func__)); + if (sq != NULL) { + KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), + ("%s: mismatch between sleep/wakeup and cv_*", __func__)); - return (sleepq_remove_matching(sq, queue, match_any, pri)); + sleepq_remove_matching(sq, queue, match_any, pri); + } } /* * Resume threads on the sleep queue that match the given predicate. */ -int +void sleepq_remove_matching(struct sleepqueue *sq, int queue, bool (*matches)(struct thread *), int pri) { @@ -1020,8 +1019,6 @@ if (matches(td)) sleepq_resume_thread(sq, td, pri, SRQ_HOLD); } - - return (0); } /* @@ -1113,7 +1110,7 @@ * * Requires thread lock on entry, releases on return. */ -int +void sleepq_abort(struct thread *td, int intrval) { struct sleepqueue *sq; @@ -1131,7 +1128,7 @@ */ if (td->td_flags & TDF_TIMEOUT) { thread_unlock(td); - return (0); + return; } CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", @@ -1145,7 +1142,7 @@ */ if (!TD_IS_SLEEPING(td)) { thread_unlock(td); - return (0); + return; } wchan = td->td_wchan; MPASS(wchan != NULL); @@ -1154,7 +1151,6 @@ /* Thread is asleep on sleep queue sq, so wake it up. */ sleepq_resume_thread(sq, td, 0, 0); - return (0); } void diff --git a/sys/sys/proc.h b/sys/sys/proc.h --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1161,7 +1161,6 @@ void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, int *resident_count, bool *super); void kern_yield(int); -void kick_proc0(void); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); diff --git a/sys/sys/sleepqueue.h b/sys/sys/sleepqueue.h --- a/sys/sys/sleepqueue.h +++ b/sys/sys/sleepqueue.h @@ -86,21 +86,21 @@ #define SLEEPQ_DROP 0x400 /* Return without lock held. */ void init_sleepqueues(void); -int sleepq_abort(struct thread *td, int intrval); +void sleepq_abort(struct thread *td, int intrval); void sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue); struct sleepqueue *sleepq_alloc(void); -int sleepq_broadcast(const void *wchan, int flags, int pri, int queue); +void sleepq_broadcast(const void *wchan, int flags, int pri, int queue); void sleepq_chains_remove_matching(bool (*matches)(struct thread *)); void sleepq_free(struct sleepqueue *sq); void sleepq_lock(const void *wchan); struct sleepqueue *sleepq_lookup(const void *wchan); void sleepq_release(const void *wchan); void sleepq_remove(struct thread *td, const void *wchan); -int sleepq_remove_matching(struct sleepqueue *sq, int queue, +void sleepq_remove_matching(struct sleepqueue *sq, int queue, bool (*matches)(struct thread *), int pri); void sleepq_remove_nested(struct thread *td); -int sleepq_signal(const void *wchan, int flags, int pri, int queue); +void sleepq_signal(const void *wchan, int flags, int pri, int queue); void sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr, int flags); #define sleepq_set_timeout(wchan, timo) \ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -836,11 +836,3 @@ vmspace_exitfree(p); /* and clean-out the vmspace */ } - -/* - * This used to kick the thread which faults in threads. - */ -void -kick_proc0(void) -{ -}