diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c --- a/sys/arm/arm/mp_machdep.c +++ b/sys/arm/arm/mp_machdep.c @@ -182,6 +182,7 @@ pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); + schedinit_ap(); #ifdef VFP vfp_init(); #endif diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c --- a/sys/arm64/arm64/mp_machdep.c +++ b/sys/arm64/arm64/mp_machdep.c @@ -255,6 +255,7 @@ /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; + schedinit_ap(); /* Initialize curpmap to match TTBR0's current setting. */ pmap0 = vmspace_pmap(&vmspace0); diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -678,6 +678,13 @@ mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN); } +void +schedinit_ap(void) +{ + + /* Nothing needed. */ +} + int sched_runnable(void) { diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1743,6 +1743,26 @@ ts0->ts_cpu = curcpu; /* set valid CPU number */ } +/* + * schedinit_ap() is needed prior to calling sched_throw(NULL) to ensure that + * the pcpu requirements are met for any calls in the period between curthread + * initialization and sched_throw(). One can safely add threads to the queue + * before sched_throw(), for instance, as long as the thread lock is setup + * correctly. + * + * TDQ_SELF() relies on the below sched pcpu setting; it may be used only + * after schedinit_ap(). + */ +void +schedinit_ap(void) +{ + +#ifdef SMP + PCPU_SET(sched, DPCPU_PTR(tdq)); +#endif + PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(TDQ_SELF()); +} + /* * This is only somewhat accurate since given many processes of the same * priority they will switch when their slices run out, which will be @@ -2973,19 +2993,14 @@ struct thread *newtd; struct tdq *tdq; + tdq = TDQ_SELF(); if (__predict_false(td == NULL)) { -#ifdef SMP - PCPU_SET(sched, DPCPU_PTR(tdq)); -#endif - /* Correct spinlock nesting and acquire the correct lock. */ - tdq = TDQ_SELF(); TDQ_LOCK(tdq); + /* Correct spinlock nesting. */ spinlock_exit(); PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); - PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(tdq); } else { - tdq = TDQ_SELF(); THREAD_LOCK_ASSERT(td, MA_OWNED); THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); tdq_load_rem(tdq, td); diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c --- a/sys/mips/mips/mp_machdep.c +++ b/sys/mips/mips/mp_machdep.c @@ -311,6 +311,7 @@ /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); + schedinit_ap(); mtx_lock_spin(&ap_boot_mtx); diff --git a/sys/powerpc/aim/mp_cpudep.c b/sys/powerpc/aim/mp_cpudep.c --- a/sys/powerpc/aim/mp_cpudep.c +++ b/sys/powerpc/aim/mp_cpudep.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -134,6 +135,7 @@ #endif pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; sp = pcpup->pc_curpcb->pcb_sp; + schedinit_ap(); return (sp); } diff --git a/sys/powerpc/booke/mp_cpudep.c b/sys/powerpc/booke/mp_cpudep.c --- a/sys/powerpc/booke/mp_cpudep.c +++ b/sys/powerpc/booke/mp_cpudep.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -85,6 +86,7 @@ #endif pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; sp = pcpup->pc_curpcb->pcb_sp; + schedinit_ap(); /* XXX shouldn't the pcb_sp be checked/forced for alignment here?? */ diff --git a/sys/riscv/riscv/mp_machdep.c b/sys/riscv/riscv/mp_machdep.c --- a/sys/riscv/riscv/mp_machdep.c +++ b/sys/riscv/riscv/mp_machdep.c @@ -248,6 +248,7 @@ /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; + schedinit_ap(); /* * Identify current CPU. This is necessary to setup diff --git a/sys/sys/sched.h b/sys/sys/sched.h --- a/sys/sys/sched.h +++ b/sys/sys/sched.h @@ -226,6 +226,11 @@ * Fixup scheduler state for proc0 and thread0 */ void schedinit(void); + +/* + * Fixup scheduler state for secondary APs + */ +void schedinit_ap(void); #endif /* _KERNEL */ /* POSIX 1003.1b Process Scheduling */ diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c --- a/sys/x86/x86/mp_x86.c +++ b/sys/x86/x86/mp_x86.c @@ -1040,6 +1040,7 @@ /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); + schedinit_ap(); mtx_lock_spin(&ap_boot_mtx);