Index: head/sys/kern/kern_umtx.c =================================================================== --- head/sys/kern/kern_umtx.c +++ head/sys/kern/kern_umtx.c @@ -4411,20 +4411,20 @@ struct thread *td; KASSERT(p == curproc, ("need curproc")); - PROC_LOCK(p); KASSERT((p->p_flag & P_HADTHREADS) == 0 || (p->p_flag & P_STOPPED_SINGLE) != 0, ("curproc must be single-threaded")); + /* + * There is no need to lock the list as only this thread can be + * running. + */ FOREACH_THREAD_IN_PROC(p, td) { KASSERT(td == curthread || ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)), ("running thread %p %p", p, td)); - PROC_UNLOCK(p); umtx_thread_cleanup(td); - PROC_LOCK(p); td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0; } - PROC_UNLOCK(p); } /* @@ -4541,17 +4541,21 @@ */ uq = td->td_umtxq; if (uq != NULL) { - mtx_lock(&umtx_lock); - uq->uq_inherited_pri = PRI_MAX; - while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { - pi->pi_owner = NULL; - TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); + if (uq->uq_inherited_pri != PRI_MAX || + !TAILQ_EMPTY(&uq->uq_pi_contested)) { + mtx_lock(&umtx_lock); + uq->uq_inherited_pri = PRI_MAX; + while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { + pi->pi_owner = NULL; + TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); + } + mtx_unlock(&umtx_lock); } - mtx_unlock(&umtx_lock); - thread_lock(td); - sched_lend_user_prio(td, PRI_MAX); - thread_unlock(td); + sched_lend_user_prio_cond(td, PRI_MAX); } + + if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0) + return; /* * Handle terminated robust mutexes. Must be done after Index: head/sys/kern/sched_4bsd.c =================================================================== --- head/sys/kern/sched_4bsd.c +++ head/sys/kern/sched_4bsd.c @@ -930,6 +930,27 @@ td->td_flags |= TDF_NEEDRESCHED; } +/* + * Like the above but first check if there is anything to do. + */ +void +sched_lend_user_prio_cond(struct thread *td, u_char prio) +{ + + if (td->td_lend_user_pri != prio) + goto lend; + if (td->td_user_pri != min(prio, td->td_base_user_pri)) + goto lend; + if (td->td_priority >= td->td_user_pri) + goto lend; + return; + +lend: + thread_lock(td); + sched_lend_user_prio(td, prio); + thread_unlock(td); +} + void sched_sleep(struct thread *td, int pri) { Index: head/sys/kern/sched_ule.c =================================================================== --- head/sys/kern/sched_ule.c +++ head/sys/kern/sched_ule.c @@ -1861,6 +1861,27 @@ td->td_flags |= TDF_NEEDRESCHED; } +/* + * Like the above but first check if there is anything to do. + */ +void +sched_lend_user_prio_cond(struct thread *td, u_char prio) +{ + + if (td->td_lend_user_pri != prio) + goto lend; + if (td->td_user_pri != min(prio, td->td_base_user_pri)) + goto lend; + if (td->td_priority >= td->td_user_pri) + goto lend; + return; + +lend: + thread_lock(td); + sched_lend_user_prio(td, prio); + thread_unlock(td); +} + #ifdef SMP /* * This tdq is about to idle. Try to steal a thread from another CPU before Index: head/sys/sys/sched.h =================================================================== --- head/sys/sys/sched.h +++ head/sys/sys/sched.h @@ -96,6 +96,7 @@ void sched_fork_thread(struct thread *td, struct thread *child); void sched_lend_prio(struct thread *td, u_char prio); void sched_lend_user_prio(struct thread *td, u_char pri); +void sched_lend_user_prio_cond(struct thread *td, u_char pri); fixpt_t sched_pctcpu(struct thread *td); void sched_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td, int prio);