Index: head/sys/kern/sched_4bsd.c =================================================================== --- head/sys/kern/sched_4bsd.c +++ head/sys/kern/sched_4bsd.c @@ -816,7 +816,12 @@ static void sched_priority(struct thread *td, u_char prio) { - + struct thread *newtd; + struct runq *rq; + u_char orig_pri; +#ifdef SMP + struct thread *cputd; +#endif KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, @@ -832,10 +837,43 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); if (td->td_priority == prio) return; + orig_pri = td->td_priority; td->td_priority = prio; if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { sched_rem(td); sched_add(td, SRQ_BORING); + } else if (orig_pri < prio && TD_IS_RUNNING(td)) { + /* + * If we have decreased the priority of a running thread, we + * have to check if it should be preempted. + */ + rq = &runq; + newtd = runq_choose(&runq); +#ifdef SMP + cputd = runq_choose(&runq_pcpu[td->td_oncpu]); + if (newtd == NULL || + (cputd != NULL && cputd->td_priority < td->td_priority)) + newtd = cputd; +#endif + + if (newtd != NULL && newtd->td_priority < prio +#ifndef FULL_PREEMPTION + && (newtd->td_priority <= PRI_MAX_ITHD || + prio >= PRI_MIN_IDLE)) +#endif + ) { + if (td == curthread) + /* + * Don't reschedule the thread here as it may + * be losing priority because it has released a + * mutex, and in that case we need it to finish + * releasing the lock before it gets preempted. + */ + td->td_owepreempt = 1; + else + kick_other_cpu(newtd->td_priority, + td->td_oncpu); + } } } Index: head/sys/kern/sched_ule.c =================================================================== --- head/sys/kern/sched_ule.c +++ head/sys/kern/sched_ule.c @@ -319,7 +319,7 @@ #ifdef SMP static int tdq_move(struct tdq *, struct tdq *); static int tdq_idled(struct tdq *); -static void tdq_notify(struct tdq *, struct thread *); +static void tdq_notify(struct tdq *, int); static struct thread *tdq_steal(struct tdq *, int); static struct thread *runq_steal(struct runq *, int); static int sched_pickcpu(struct thread *, int); @@ -1040,16 +1040,14 @@ * Notify a remote cpu of new work. Sends an IPI if criteria are met. */ static void -tdq_notify(struct tdq *tdq, struct thread *td) +tdq_notify(struct tdq *tdq, int pri) { struct thread *ctd; - int pri; int cpu; if (tdq->tdq_ipipending) return; - cpu = td_get_sched(td)->ts_cpu; - pri = td->td_priority; + cpu = TD_ID(tdq); ctd = pcpu_find(cpu)->pc_curthread; if (!sched_shouldpreempt(pri, ctd->td_priority, 1)) return; @@ -1675,6 +1673,22 @@ ts->ts_ltick = t; } +static void +sched_check_preempt(struct tdq *tdq, struct thread *td) +{ + + KASSERT(TD_IS_RUNNING(td), ("thread is not running")); + TDQ_LOCK_ASSERT(tdq, MA_OWNED); + KASSERT(tdq == TDQ_CPU(td->td_sched->ts_cpu), + ("tdq does not contain td")); + + if (tdq == TDQ_SELF()) { + if (sched_shouldpreempt(tdq->tdq_lowpri, td->td_priority, 0)) + td->td_owepreempt = 1; + } else + tdq_notify(tdq, tdq->tdq_lowpri); +} + /* * Adjust the priority of a thread. Move it to the appropriate run-queue * if necessary. This is the back-end for several priority related @@ -1726,6 +1740,9 @@ tdq->tdq_lowpri = prio; else if (tdq->tdq_lowpri == oldpri) tdq_setlowpri(tdq, td); + + if (oldpri < prio) + sched_check_preempt(tdq, td); return; } td->td_priority = prio; @@ -1854,7 +1871,7 @@ */ tdq_lock_pair(tdn, tdq); tdq_add(tdn, td, flags); - tdq_notify(tdn, td); + tdq_notify(tdn, td->td_priority); TDQ_UNLOCK(tdn); spinlock_exit(); #endif @@ -2429,7 +2446,7 @@ tdq = sched_setcpu(td, cpu, flags); tdq_add(tdq, td, flags); if (cpu != PCPU_GET(cpuid)) { - tdq_notify(tdq, td); + tdq_notify(tdq, td->td_priority); return; } #else