Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c +++ sys/kern/sched_4bsd.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include @@ -262,6 +263,19 @@ "allow threads to share a quantum"); #endif +SYSCTL_NODE(_kern_sched, OID_AUTO, ithread, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "Interrupt thread stats"); + +static COUNTER_U64_DEFINE_EARLY(ithread_demotions); +SYSCTL_COUNTER_U64(_kern_sched_ithread, OID_AUTO, demotions, + CTLFLAG_RD, &ithread_demotions, + "Count of interrupt thread priority demotions"); + +static COUNTER_U64_DEFINE_EARLY(ithread_preemptions); +SYSCTL_COUNTER_U64(_kern_sched_ithread, OID_AUTO, preemptions, + CTLFLAG_RD, &ithread_preemptions, + "Count of interrupt thread preemptions due to time-sharing"); + SDT_PROVIDER_DEFINE(sched); SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", @@ -739,7 +753,20 @@ */ if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { ts->ts_slice = sched_slice; - td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; + + /* + * If an ithread uses a full quantum, demote its + * priority and preempt it. + */ + if (PRI_BASE(td->td_pri_class) == PRI_ITHD) { + counter_u64_add(ithread_preemptions, 1); + td->td_owepreempt = 1; + if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) { + counter_u64_add(ithread_demotions, 1); + sched_prio(td, td->td_base_pri + RQ_PPQ); + } + } else + td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } stat = DPCPU_PTR(idlestat); @@ -1134,6 +1161,15 @@ td->td_slptick = 0; ts->ts_slptime = 0; ts->ts_slice = sched_slice; + + /* + * When resuming an idle ithread, restore its base ithread + * priority. + */ + if (PRI_BASE(td->td_pri_class) == PRI_ITHD && + td->td_base_pri != td->td_base_ithread_pri) + sched_prio(td, td->td_base_ithread_pri); + sched_add(td, srqflags); }