diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -95,6 +95,7 @@ int ts_cpu; /* CPU that we have affinity for. */ int ts_rltick; /* Real last tick, for affinity. */ int ts_slice; /* Ticks of slice remaining. */ + int ts_usedslice; u_int ts_slptime; /* Number of ticks we vol. slept */ u_int ts_runtime; /* Number of ticks we were running */ int ts_ltick; /* Last tick that we were running on */ @@ -212,11 +213,12 @@ static int __read_mostly realstathz = 127; /* reset during boot. */ static int __read_mostly sched_slice = 10; /* reset during boot. */ static int __read_mostly sched_slice_min = 1; /* reset during boot. */ +static bool __read_mostly sched_pick_short = true; #ifdef PREEMPTION #ifdef FULL_PREEMPTION static int __read_mostly preempt_thresh = PRI_MAX_IDLE; #else -static int __read_mostly preempt_thresh = PRI_MIN_KERN; +static int __read_mostly preempt_thresh = PRI_MAX_TIMESHARE + 1; #endif #else static int __read_mostly preempt_thresh = 0; @@ -340,6 +342,7 @@ static inline int sched_shouldpreempt(int, int, int); static void tdq_print(int cpu); static void runq_print(struct runq *rq); +static inline int td_slice(struct thread *td, struct tdq *tdq); static int tdq_add(struct tdq *, struct thread *, int); #ifdef SMP static int tdq_move(struct tdq *, struct tdq *); @@ -495,6 +498,12 @@ if (pri < PRI_MIN_BATCH) { ts->ts_runq = &tdq->tdq_realtime; } else if (pri <= PRI_MAX_BATCH) { + if (sched_pick_short && ts->ts_usedslice < td_slice(td, tdq)) { + ts->ts_runq = &tdq->tdq_realtime; + runq_add(ts->ts_runq, td, flags); + return; + } + ts->ts_runq = &tdq->tdq_timeshare; KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH, ("Invalid priority %d on timeshare runq", pri)); @@ -1779,6 +1788,7 @@ ts0->ts_ltick = ticks; ts0->ts_ftick = ticks; ts0->ts_slice = 0; + ts0->ts_usedslice = 0; ts0->ts_cpu = curcpu; /* set valid CPU number */ } @@ -2298,6 +2308,7 @@ cpu_switch(td, newtd, mtx); cpuid = td->td_oncpu = PCPU_GET(cpuid); + ts->ts_usedslice = 0; SDT_PROBE0(sched, , , on__cpu); #ifdef HWPMC_HOOKS if (PMC_PROC_IS_USING_PMCS(td->td_proc)) @@ -2632,6 +2643,7 @@ * time slice (default is 100ms). */ ts->ts_slice += cnt; + ts->ts_usedslice += cnt; if (ts->ts_slice >= td_slice(td, tdq)) { ts->ts_slice = 0; @@ -3333,6 +3345,8 @@ SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, &sched_idlespinthresh, 0, "Threshold before we will permit idle thread spinning"); +SYSCTL_BOOL(_kern_sched, OID_AUTO, pick_short, CTLFLAG_RW, + &sched_pick_short, 0, ""); #ifdef SMP SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, "Number of hz ticks to keep thread affinity for"); diff --git a/sys/sys/proc.h b/sys/sys/proc.h --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -393,7 +393,7 @@ struct thread0_storage { struct thread t0st_thread; - uint64_t t0st_sched[10]; + uint64_t t0st_sched[12]; }; struct mtx *thread_lock_block(struct thread *);