Changeset View
Standalone View
sched_ule.c
Show First 20 Lines • Show All 199 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* tickincr: Converts a stathz tick into a hz domain scaled by | * tickincr: Converts a stathz tick into a hz domain scaled by | ||||
* the shift factor. Without the shift the error rate | * the shift factor. Without the shift the error rate | ||||
* due to rounding would be unacceptably high. | * due to rounding would be unacceptably high. | ||||
* realstathz: stathz is sometimes 0 and run off of hz. | * realstathz: stathz is sometimes 0 and run off of hz. | ||||
* sched_slice: Runtime of each thread before rescheduling. | * sched_slice: Runtime of each thread before rescheduling. | ||||
* preempt_thresh: Priority threshold for preemption and remote IPIs. | * preempt_thresh: Priority threshold for preemption and remote IPIs. | ||||
* preempt_timeshare_delta: Preempt if the difference in timeshare | |||||
* priorities exceeds this threshold. | |||||
*/ | */ | ||||
static int sched_interact = SCHED_INTERACT_THRESH; | static int sched_interact = SCHED_INTERACT_THRESH; | ||||
static int tickincr = 8 << SCHED_TICK_SHIFT; | static int tickincr = 8 << SCHED_TICK_SHIFT; | ||||
static int realstathz = 127; /* reset during boot. */ | static int realstathz = 127; /* reset during boot. */ | ||||
static int sched_slice = 10; /* reset during boot. */ | static int sched_slice = 10; /* reset during boot. */ | ||||
static int sched_slice_min = 1; /* reset during boot. */ | static int sched_slice_min = 1; /* reset during boot. */ | ||||
#ifdef PREEMPTION | #ifdef PREEMPTION | ||||
#ifdef FULL_PREEMPTION | #ifdef FULL_PREEMPTION | ||||
static int preempt_thresh = PRI_MAX_IDLE; | static int preempt_thresh = PRI_MAX_IDLE; | ||||
static int preempt_timeshare_delta = 0; | |||||
#else | #else | ||||
static int preempt_thresh = PRI_MIN_KERN; | static int preempt_thresh = PRI_MIN_KERN; | ||||
static int preempt_timeshare_delta = (PRI_BATCH_RANGE - SCHED_PRI_NRESV) / 2; | |||||
#endif | #endif | ||||
#else | #else | ||||
static int preempt_timeshare_delta = INT_MAX; | |||||
static int preempt_thresh = 0; | static int preempt_thresh = 0; | ||||
#endif | #endif | ||||
static int static_boost = PRI_MIN_BATCH; | static int static_boost = PRI_MIN_BATCH; | ||||
static int sched_idlespins = 10000; | static int sched_idlespins = 10000; | ||||
static int sched_idlespinthresh = -1; | static int sched_idlespinthresh = -1; | ||||
/* | /* | ||||
* tdq - per processor runqs and statistics. All fields are protected by the | * tdq - per processor runqs and statistics. All fields are protected by the | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | |||||
/* Operations on per processor queues */ | /* Operations on per processor queues */ | ||||
static struct thread *tdq_choose(struct tdq *); | static struct thread *tdq_choose(struct tdq *); | ||||
static void tdq_setup(struct tdq *); | static void tdq_setup(struct tdq *); | ||||
static void tdq_load_add(struct tdq *, struct thread *); | static void tdq_load_add(struct tdq *, struct thread *); | ||||
static void tdq_load_rem(struct tdq *, struct thread *); | static void tdq_load_rem(struct tdq *, struct thread *); | ||||
static __inline void tdq_runq_add(struct tdq *, struct thread *, int); | static __inline void tdq_runq_add(struct tdq *, struct thread *, int); | ||||
static __inline void tdq_runq_rem(struct tdq *, struct thread *); | static __inline void tdq_runq_rem(struct tdq *, struct thread *); | ||||
static inline int sched_shouldpreempt(int, int, int); | static __inline void tdq_runq_elevate(struct tdq *, struct thread *); | ||||
static inline int sched_shouldpreempt(struct tdq *, struct thread *, | |||||
struct thread *); | |||||
void tdq_print(int cpu); | void tdq_print(int cpu); | ||||
static void runq_print(struct runq *rq); | static void runq_print(struct runq *rq); | ||||
static void tdq_add(struct tdq *, struct thread *, int); | static void tdq_add(struct tdq *, struct thread *, int); | ||||
#ifdef SMP | #ifdef SMP | ||||
static struct thread *tdq_move(struct tdq *, struct tdq *); | static struct thread *tdq_move(struct tdq *, struct tdq *); | ||||
static int tdq_idled(struct tdq *); | static int tdq_idled(struct tdq *); | ||||
static void tdq_notify(struct tdq *, struct thread *); | static void tdq_notify(struct tdq *, struct thread *); | ||||
static struct thread *tdq_steal(struct tdq *, int); | static struct thread *tdq_steal(struct tdq *, int); | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | tdq_print(int cpu) | ||||
printf("\trealtime runq:\n"); | printf("\trealtime runq:\n"); | ||||
runq_print(&tdq->tdq_realtime); | runq_print(&tdq->tdq_realtime); | ||||
printf("\ttimeshare runq:\n"); | printf("\ttimeshare runq:\n"); | ||||
runq_print(&tdq->tdq_timeshare); | runq_print(&tdq->tdq_timeshare); | ||||
printf("\tidle runq:\n"); | printf("\tidle runq:\n"); | ||||
runq_print(&tdq->tdq_idle); | runq_print(&tdq->tdq_idle); | ||||
} | } | ||||
/* | |||||
* Evaluate whether we should preempt a thread or simply set NEEDRESCHED. | |||||
*/ | |||||
static inline int | static inline int | ||||
sched_shouldpreempt(int pri, int cpri, int remote) | sched_shouldpreempt(struct tdq *tdq, struct thread *td, struct thread *ctd) | ||||
{ | { | ||||
int pri, cpri; | |||||
int remote, timeshare; | |||||
u_char ridx; | |||||
/* | /* | ||||
* If the new priority is not better than the current priority there is | * If the new priority is not better than the current priority there is | ||||
* nothing to do. | * nothing to do. | ||||
*/ | */ | ||||
pri = td->td_priority; | |||||
cpri = ctd->td_priority; | |||||
if (pri >= cpri) | if (pri >= cpri) | ||||
return (0); | return (0); | ||||
/* | /* | ||||
* Always preempt idle. | * Always preempt idle. | ||||
*/ | */ | ||||
if (cpri >= PRI_MIN_IDLE) | if (cpri >= PRI_MIN_IDLE) | ||||
return (1); | return (1); | ||||
/* | /* | ||||
* If the threads are not both on the timeshare queue | |||||
* NEEEDRESCHED is set unconditionally for the lower | |||||
* priority curthread. We will also preempt in most | |||||
* cases which will harmlessly clear the bit. | |||||
* | |||||
* The index determines run-order more strongly than | |||||
* priority for timeshare threads. We eliminate needless | |||||
* switches by filtering on run-queue order here. | |||||
*/ | |||||
timeshare = td_get_sched(td)->ts_runq == &tdq->tdq_timeshare && | |||||
td_get_sched(ctd)->ts_runq == &tdq->tdq_timeshare; | |||||
ridx = tdq->tdq_ridx; | |||||
if (!timeshare || | |||||
(u_char)(td->td_rqindex - ridx) < | |||||
(u_char)(ctd->td_rqindex - ridx)) { | |||||
jeff: This avoids excessive NEEDRESCHED if the thread won't run immediately. | |||||
if (ctd->td_lock == TDQ_LOCKPTR(tdq)) | |||||
jeffAuthorUnsubmitted Not Done Inline ActionsIf curthread had been preempted after it has added itself to a sleep queue this will be false. jeff: If curthread had been preempted after it has added itself to a sleep queue this will be false. | |||||
ctd->td_flags |= TDF_NEEDRESCHED; | |||||
} | |||||
/* | |||||
* If preemption is disabled don't preempt others. | * If preemption is disabled don't preempt others. | ||||
*/ | */ | ||||
if (preempt_thresh == 0) | if (preempt_thresh == 0) | ||||
return (0); | return (0); | ||||
/* | /* | ||||
* Preempt if we exceed the threshold. | * Preempt if we exceed the threshold. | ||||
*/ | */ | ||||
if (pri <= preempt_thresh) | if (pri <= preempt_thresh) | ||||
return (1); | return (1); | ||||
/* | /* | ||||
* If we're interactive or better and there is non-interactive | * If we're interactive or better and there is non-interactive | ||||
* or worse running preempt only remote processors. | * or worse running preempt remote processors. Local processors | ||||
* will honor this from NEEDRESCHED and avoid preemption. A future | |||||
* enhancement could do the same for remote. | |||||
*/ | */ | ||||
remote = tdq != TDQ_SELF(); | |||||
if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT) | if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT) | ||||
return (1); | return (1); | ||||
/* | |||||
* If the difference between the two timeshare threads priorities | |||||
* exceeds the delta threshold we elevate the new thread on the | |||||
* timeshare queue and request a resched. | |||||
*/ | |||||
if (timeshare && cpri - pri > preempt_timeshare_delta) { | |||||
tdq_runq_elevate(tdq, td); | |||||
jeffAuthorUnsubmitted Not Done Inline ActionsUnfortunately this works when you are considering two threads that are more than delta apart but not three threads. Thread a is pri 0, thread b is + .5delta and thread c is 1.5delta. Thread c is running a finishes running or finishes its slice If you reverse the order of a and b you get the same inversion. This improves the situation and given stochastic wake times will probably average out ok. It doesn't yet feel as satisfying as it could be though. jeff: Unfortunately this works when you are considering two threads that are more than delta apart… | |||||
if (ctd->td_lock == TDQ_LOCKPTR(tdq)) | |||||
ctd->td_flags |= TDF_NEEDRESCHED; | |||||
return (remote); | |||||
jeffAuthorUnsubmitted Not Done Inline ActionsHere what will happen is that we will send the IPI but not actually preempt for more than the IPI handler. So if we hit while we're in kernel code it will not switch until ast(). If we hit while we're in user code it will switch in the ast() from IPI return. In this way we will lower the latency for interactive tasks without increasing kernel contention from preemption. jeff: Here what will happen is that we will send the IPI but not actually preempt for more than the… | |||||
} | |||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Add a thread to the actual run-queue. Keeps transferable counts up to | * Add a thread to the actual run-queue. Keeps transferable counts up to | ||||
* date with what is actually on the run-queue. Selects the correct | * date with what is actually on the run-queue. Selects the correct | ||||
* queue position for timeshare threads. | * queue position for timeshare threads. | ||||
*/ | */ | ||||
Show All 38 Lines | if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { | ||||
pri = tdq->tdq_ridx; | pri = tdq->tdq_ridx; | ||||
runq_add_pri(ts->ts_runq, td, pri, flags); | runq_add_pri(ts->ts_runq, td, pri, flags); | ||||
return; | return; | ||||
} else | } else | ||||
ts->ts_runq = &tdq->tdq_idle; | ts->ts_runq = &tdq->tdq_idle; | ||||
runq_add(ts->ts_runq, td, flags); | runq_add(ts->ts_runq, td, flags); | ||||
} | } | ||||
static void | |||||
tdq_runq_elevate(struct tdq *tdq, struct thread *td) | |||||
{ | |||||
struct td_sched *ts; | |||||
TDQ_LOCK_ASSERT(tdq, MA_OWNED); | |||||
THREAD_LOCK_ASSERT(td, MA_OWNED); | |||||
ts = td_get_sched(td); | |||||
if (ts->ts_runq == &tdq->tdq_timeshare) { | |||||
runq_remove_idx(ts->ts_runq, td, NULL); | |||||
runq_add_pri(ts->ts_runq, td, tdq->tdq_ridx, SRQ_PREEMPTED); | |||||
} | |||||
} | |||||
/* | /* | ||||
* Remove a thread from a run-queue. This typically happens when a thread | * Remove a thread from a run-queue. This typically happens when a thread | ||||
* is selected to run. Running threads are not on the queue and the | * is selected to run. Running threads are not on the queue and the | ||||
* transferable count does not reflect them. | * transferable count does not reflect them. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
tdq_runq_rem(struct tdq *tdq, struct thread *td) | tdq_runq_rem(struct tdq *tdq, struct thread *td) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 557 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Notify a remote cpu of new work. Sends an IPI if criteria are met. | * Notify a remote cpu of new work. Sends an IPI if criteria are met. | ||||
*/ | */ | ||||
static void | static void | ||||
tdq_notify(struct tdq *tdq, struct thread *td) | tdq_notify(struct tdq *tdq, struct thread *td) | ||||
{ | { | ||||
struct thread *ctd; | struct thread *ctd; | ||||
int pri; | |||||
int cpu; | int cpu; | ||||
if (tdq->tdq_ipipending) | if (tdq->tdq_ipipending) | ||||
return; | return; | ||||
cpu = td_get_sched(td)->ts_cpu; | cpu = td_get_sched(td)->ts_cpu; | ||||
pri = td->td_priority; | |||||
ctd = pcpu_find(cpu)->pc_curthread; | ctd = pcpu_find(cpu)->pc_curthread; | ||||
if (!sched_shouldpreempt(pri, ctd->td_priority, 1)) | |||||
if (!sched_shouldpreempt(tdq, td, ctd)) | |||||
return; | return; | ||||
/* | /* | ||||
* Make sure that our caller's earlier update to tdq_load is | * Make sure that our caller's earlier update to tdq_load is | ||||
* globally visible before we read tdq_cpu_idle. Idle thread | * globally visible before we read tdq_cpu_idle. Idle thread | ||||
* accesses both of them without locks, and the order is important. | * accesses both of them without locks, and the order is important. | ||||
*/ | */ | ||||
atomic_thread_fence_seq_cst(); | atomic_thread_fence_seq_cst(); | ||||
▲ Show 20 Lines • Show All 467 Lines • ▼ Show 20 Lines | sched_priority(struct thread *td) | ||||
* | * | ||||
* The nice value of the process has a linear effect on the calculated | * The nice value of the process has a linear effect on the calculated | ||||
* score. Negative nice values make it easier for a thread to be | * score. Negative nice values make it easier for a thread to be | ||||
* considered interactive. | * considered interactive. | ||||
*/ | */ | ||||
score = imax(0, sched_interact_score(td) + td->td_proc->p_nice); | score = imax(0, sched_interact_score(td) + td->td_proc->p_nice); | ||||
if (score < sched_interact) { | if (score < sched_interact) { | ||||
pri = PRI_MIN_INTERACT; | pri = PRI_MIN_INTERACT; | ||||
pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) / | pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT) * score) / | ||||
sched_interact) * score; | sched_interact; | ||||
jeffAuthorUnsubmitted Not Done Inline ActionsThis didn't work for values of sched_interact that diverged greatly from MAX_INTERACT - MIN_INTERACT. It just so happens it produces reasonable values with the defaults. jeff: This didn't work for values of sched_interact that diverged greatly from MAX_INTERACT… | |||||
KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT, | KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT, | ||||
("sched_priority: invalid interactive priority %d score %d", | ("sched_priority: invalid interactive priority %d score %d", | ||||
pri, score)); | pri, score)); | ||||
} else { | } else { | ||||
pri = SCHED_PRI_MIN; | pri = SCHED_PRI_MIN; | ||||
if (td_get_sched(td)->ts_ticks) | if (td_get_sched(td)->ts_ticks) | ||||
pri += min(SCHED_PRI_TICKS(td_get_sched(td)), | pri += min(SCHED_PRI_TICKS(td_get_sched(td)), | ||||
SCHED_PRI_RANGE - 1); | SCHED_PRI_RANGE - 1); | ||||
▲ Show 20 Lines • Show All 588 Lines • ▼ Show 20 Lines | sched_sleep(struct thread *td, int prio) | ||||
td->td_slptick = ticks; | td->td_slptick = ticks; | ||||
if (TD_IS_SUSPENDED(td) || prio >= PSOCK) | if (TD_IS_SUSPENDED(td) || prio >= PSOCK) | ||||
td->td_flags |= TDF_CANSWAP; | td->td_flags |= TDF_CANSWAP; | ||||
if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) | if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) | ||||
return; | return; | ||||
if (static_boost == 1 && prio) | if (static_boost == 1 && prio) | ||||
sched_prio(td, prio); | sched_prio(td, prio); | ||||
else if (static_boost && td->td_priority > static_boost) | else if (static_boost > 1 && td->td_priority > static_boost) | ||||
jeffAuthorUnsubmitted Not Done Inline ActionsWith a static_boost of 1 and no prio set from the caller you would elevate to priority 1. jeff: With a static_boost of 1 and no prio set from the caller you would elevate to priority 1. | |||||
sched_prio(td, static_boost); | sched_prio(td, static_boost); | ||||
} | } | ||||
/* | /* | ||||
* Schedule a thread to resume execution and record how long it voluntarily | * Schedule a thread to resume execution and record how long it voluntarily | ||||
* slept. We also update the pctcpu, interactivity, and priority. | * slept. We also update the pctcpu, interactivity, and priority. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | sched_exit_thread(struct thread *td, struct thread *child) | ||||
sched_priority(td); | sched_priority(td); | ||||
thread_unlock(td); | thread_unlock(td); | ||||
} | } | ||||
void | void | ||||
sched_preempt(struct thread *td) | sched_preempt(struct thread *td) | ||||
{ | { | ||||
struct tdq *tdq; | struct tdq *tdq; | ||||
struct thread *ntd; | |||||
SDT_PROBE2(sched, , , surrender, td, td->td_proc); | SDT_PROBE2(sched, , , surrender, td, td->td_proc); | ||||
thread_lock(td); | thread_lock(td); | ||||
tdq = TDQ_SELF(); | tdq = TDQ_SELF(); | ||||
TDQ_LOCK_ASSERT(tdq, MA_OWNED); | TDQ_LOCK_ASSERT(tdq, MA_OWNED); | ||||
tdq->tdq_ipipending = 0; | tdq->tdq_ipipending = 0; | ||||
if (td->td_priority > tdq->tdq_lowpri) { | |||||
/* | |||||
* The state could've changed since the remote processor signaled | |||||
* or it may have simply signaled to trigger NEEDRESCHED. We | |||||
* filter again here before preempting. | |||||
*/ | |||||
ntd = tdq_choose(tdq); | |||||
if (ntd != NULL && sched_shouldpreempt(tdq, ntd, td)) { | |||||
jeffAuthorUnsubmitted Not Done Inline ActionsThis is responsible for most of the reduction in preemption counts. In this way the IPI will still notify the remote processor immediately of the scheduling event but we don't necessarily have to respond right away. jeff: This is responsible for most of the reduction in preemption counts. In this way the IPI will… | |||||
int flags; | int flags; | ||||
flags = SW_INVOL | SW_PREEMPT; | flags = SW_INVOL | SW_PREEMPT; | ||||
if (td->td_critnest > 1) | if (td->td_critnest > 1) | ||||
td->td_owepreempt = 1; | td->td_owepreempt = 1; | ||||
else if (TD_IS_IDLETHREAD(td)) | else if (TD_IS_IDLETHREAD(td)) | ||||
mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL); | mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL); | ||||
else | else | ||||
▲ Show 20 Lines • Show All 135 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Set owepreempt if necessary. Preemption never happens directly in ULE, | * Set owepreempt if necessary. Preemption never happens directly in ULE, | ||||
* we always request it once we exit a critical section. | * we always request it once we exit a critical section. | ||||
*/ | */ | ||||
static inline void | static inline void | ||||
sched_setpreempt(struct thread *td) | sched_setpreempt(struct thread *td) | ||||
{ | { | ||||
struct thread *ctd; | struct thread *ctd; | ||||
int cpri; | |||||
int pri; | |||||
THREAD_LOCK_ASSERT(curthread, MA_OWNED); | THREAD_LOCK_ASSERT(curthread, MA_OWNED); | ||||
ctd = curthread; | ctd = curthread; | ||||
pri = td->td_priority; | if (panicstr != NULL || cold || TD_IS_INHIBITED(ctd)) | ||||
cpri = ctd->td_priority; | |||||
if (pri < cpri) | |||||
ctd->td_flags |= TDF_NEEDRESCHED; | |||||
if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) | |||||
return; | return; | ||||
if (!sched_shouldpreempt(pri, cpri, 0)) | if (!sched_shouldpreempt(TDQ_SELF(), td, ctd)) | ||||
return; | return; | ||||
ctd->td_owepreempt = 1; | ctd->td_owepreempt = 1; | ||||
} | } | ||||
/* | /* | ||||
* Add a thread to a thread queue. Select the appropriate runq and add the | * Add a thread to a thread queue. Select the appropriate runq and add the | ||||
* thread to it. This is the internal function called when the tdq is | * thread to it. This is the internal function called when the tdq is | ||||
* predetermined. | * predetermined. | ||||
▲ Show 20 Lines • Show All 526 Lines • ▼ Show 20 Lines | SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, | ||||
"Quantum for timeshare threads in microseconds"); | "Quantum for timeshare threads in microseconds"); | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, | SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, | ||||
"Quantum for timeshare threads in stathz ticks"); | "Quantum for timeshare threads in stathz ticks"); | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, | SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, | ||||
"Interactivity score threshold"); | "Interactivity score threshold"); | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, | SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, | ||||
&preempt_thresh, 0, | &preempt_thresh, 0, | ||||
"Maximal (lowest) priority for preemption"); | "Maximal (lowest) priority for preemption"); | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_timeshare_delta, CTLFLAG_RW, | |||||
&preempt_timeshare_delta, 0, | |||||
"Difference in timeshare priorities required for preemption"); | |||||
SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0, | SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0, | ||||
"Assign static kernel priorities to sleeping threads"); | "Elevate priorities of sleeping threads. " | ||||
"0 = disabled, 1 = kernel supplied value, >1 = specified priority."); | |||||
SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0, | SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0, | ||||
"Number of times idle thread will spin waiting for new work"); | "Number of times idle thread will spin waiting for new work"); | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, | SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, | ||||
&sched_idlespinthresh, 0, | &sched_idlespinthresh, 0, | ||||
"Threshold before we will permit idle thread spinning"); | "Threshold before we will permit idle thread spinning"); | ||||
#ifdef SMP | #ifdef SMP | ||||
SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, | SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, | ||||
"Number of hz ticks to keep thread affinity for"); | "Number of hz ticks to keep thread affinity for"); | ||||
Show All 21 Lines |
This avoids excessive NEEDRESCHED if the thread won't run immediately.