Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/sched_ule.c
Show First 20 Lines • Show All 2,978 Lines • ▼ Show 20 Lines | #endif | ||||
switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; | switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; | ||||
if (switchcnt != oldswitchcnt) | if (switchcnt != oldswitchcnt) | ||||
continue; | continue; | ||||
tdq->tdq_switchcnt++; | tdq->tdq_switchcnt++; | ||||
oldswitchcnt++; | oldswitchcnt++; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
markj: Maybe explain why: it's important to not enable interrupts until the CPU is running in a… | |||||
Done Inline ActionsSure, how about (with wrapping fixed): /* * sched_throw_grab() chooses a thread from the queue to switch to * next. It returns with the tdq lock dropped in a spinlock section to * keep interrupts disabled until the CPU is running in a proper threaded * context. */ kevans: Sure, how about (with wrapping fixed):
```
/*
* sched_throw_grab() chooses a thread from the… | |||||
Not Done Inline ActionsSeems ok to me. markj: Seems ok to me. | |||||
* A CPU is entering for the first time or a thread is exiting. | * sched_throw_grab() chooses a thread from the queue to switch to | ||||
* next. It returns with the tdq lock dropped in a spinlock section to | |||||
* keep interrupts disabled until the CPU is running in a proper threaded | |||||
* context. | |||||
*/ | */ | ||||
static struct thread * | |||||
sched_throw_grab(struct tdq *tdq) | |||||
{ | |||||
struct thread *newtd; | |||||
newtd = choosethread(); | |||||
spinlock_enter(); | |||||
TDQ_UNLOCK(tdq); | |||||
KASSERT(curthread->td_md.md_spinlock_count == 1, | |||||
("invalid count %d", curthread->td_md.md_spinlock_count)); | |||||
return (newtd); | |||||
} | |||||
/* | |||||
* A CPU is entering for the first time. | |||||
*/ | |||||
void | void | ||||
sched_throw(struct thread *td) | sched_ap_entry(void) | ||||
{ | { | ||||
struct thread *newtd; | struct thread *newtd; | ||||
struct tdq *tdq; | struct tdq *tdq; | ||||
tdq = TDQ_SELF(); | tdq = TDQ_SELF(); | ||||
if (__predict_false(td == NULL)) { | |||||
/* This should have been setup in schedinit_ap(). */ | |||||
THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq)); | |||||
TDQ_LOCK(tdq); | TDQ_LOCK(tdq); | ||||
/* Correct spinlock nesting. */ | /* Correct spinlock nesting. */ | ||||
spinlock_exit(); | spinlock_exit(); | ||||
PCPU_SET(switchtime, cpu_ticks()); | PCPU_SET(switchtime, cpu_ticks()); | ||||
PCPU_SET(switchticks, ticks); | PCPU_SET(switchticks, ticks); | ||||
} else { | |||||
newtd = sched_throw_grab(tdq); | |||||
/* doesn't return */ | |||||
cpu_throw(NULL, newtd); | |||||
} | |||||
/* | |||||
* A thread is exiting. | |||||
*/ | |||||
void | |||||
sched_throw(struct thread *td) | |||||
{ | |||||
struct thread *newtd; | |||||
struct tdq *tdq; | |||||
tdq = TDQ_SELF(); | |||||
MPASS(td != NULL); | |||||
THREAD_LOCK_ASSERT(td, MA_OWNED); | THREAD_LOCK_ASSERT(td, MA_OWNED); | ||||
THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); | THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); | ||||
tdq_load_rem(tdq, td); | tdq_load_rem(tdq, td); | ||||
td->td_lastcpu = td->td_oncpu; | td->td_lastcpu = td->td_oncpu; | ||||
td->td_oncpu = NOCPU; | td->td_oncpu = NOCPU; | ||||
thread_lock_block(td); | thread_lock_block(td); | ||||
} | |||||
newtd = choosethread(); | newtd = sched_throw_grab(tdq); | ||||
spinlock_enter(); | |||||
TDQ_UNLOCK(tdq); | |||||
KASSERT(curthread->td_md.md_spinlock_count == 1, | |||||
("invalid count %d", curthread->td_md.md_spinlock_count)); | |||||
/* doesn't return */ | /* doesn't return */ | ||||
if (__predict_false(td == NULL)) | |||||
cpu_throw(td, newtd); /* doesn't return */ | |||||
else | |||||
cpu_switch(td, newtd, TDQ_LOCKPTR(tdq)); | cpu_switch(td, newtd, TDQ_LOCKPTR(tdq)); | ||||
} | } | ||||
/* | /* | ||||
* This is called from fork_exit(). Just acquire the correct locks and | * This is called from fork_exit(). Just acquire the correct locks and | ||||
* let fork do the rest of the work. | * let fork do the rest of the work. | ||||
*/ | */ | ||||
void | void | ||||
sched_fork_exit(struct thread *td) | sched_fork_exit(struct thread *td) | ||||
▲ Show 20 Lines • Show All 199 Lines • Show Last 20 Lines |
Maybe explain why: it's important to not enable interrupts until the CPU is running in a threaded context.