Index: sys/arm/arm/mp_machdep.c =================================================================== --- sys/arm/arm/mp_machdep.c +++ sys/arm/arm/mp_machdep.c @@ -217,7 +217,7 @@ CTR0(KTR_SMP, "go into scheduler"); /* Enter the scheduler */ - sched_throw(NULL); + sched_ap_entry(); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ Index: sys/arm64/arm64/mp_machdep.c =================================================================== --- sys/arm64/arm64/mp_machdep.c +++ sys/arm64/arm64/mp_machdep.c @@ -293,7 +293,7 @@ MPASS(PCPU_GET(curpcb) == NULL); /* Enter the scheduler */ - sched_throw(NULL); + sched_ap_entry(); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c +++ sys/kern/sched_4bsd.c @@ -1662,12 +1662,22 @@ } } +static void +sched_throw_tail(struct thread *td) +{ + + mtx_assert(&sched_lock, MA_OWNED); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + cpu_throw(td, choosethread()); /* doesn't return */ +} + /* - * A CPU is entering for the first time or a thread is exiting. + * A CPU is entering for the first time. */ void -sched_throw(struct thread *td) +sched_ap_entry(void) { + /* * Correct spinlock nesting. The idle thread context that we are * borrowing was created so that it would start out with a single @@ -1677,20 +1687,29 @@ * spinlock_exit() will simply adjust the counts without allowing * spin lock using code to interrupt us. */ - if (td == NULL) { - mtx_lock_spin(&sched_lock); - spinlock_exit(); - PCPU_SET(switchtime, cpu_ticks()); - PCPU_SET(switchticks, ticks); - } else { - lock_profile_release_lock(&sched_lock.lock_object, true); - MPASS(td->td_lock == &sched_lock); - td->td_lastcpu = td->td_oncpu; - td->td_oncpu = NOCPU; - } - mtx_assert(&sched_lock, MA_OWNED); - KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); - cpu_throw(td, choosethread()); /* doesn't return */ + mtx_lock_spin(&sched_lock); + spinlock_exit(); + PCPU_SET(switchtime, cpu_ticks()); + PCPU_SET(switchticks, ticks); + + sched_throw_tail(NULL); +} + +/* + * A thread is exiting. + */ +void +sched_throw(struct thread *td) +{ + + MPASS(td != NULL); + + lock_profile_release_lock(&sched_lock.lock_object, true); + MPASS(td->td_lock == &sched_lock); + td->td_lastcpu = td->td_oncpu; + td->td_oncpu = NOCPU; + + sched_throw_tail(td); } void Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c +++ sys/kern/sched_ule.c @@ -2984,8 +2984,48 @@ } } +/* Returns in a spinlock section. */ +static struct thread * +sched_throw_grab(struct tdq *tdq) +{ + struct thread *newtd; + + newtd = choosethread(); + spinlock_enter(); + TDQ_UNLOCK(tdq); + KASSERT(curthread->td_md.md_spinlock_count == 1, + ("invalid count %d", curthread->td_md.md_spinlock_count)); + return (newtd); +} + +/* + * A CPU is entering for the first time. + */ +void +sched_ap_entry(void) +{ + struct thread *newtd; + struct tdq *tdq; + + tdq = TDQ_SELF(); + + /* This should have been setup in schedinit_ap(). */ + THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq)); + + TDQ_LOCK(tdq); + /* Correct spinlock nesting. */ + spinlock_exit(); + PCPU_SET(switchtime, cpu_ticks()); + PCPU_SET(switchticks, ticks); + + newtd = sched_throw_grab(tdq); + + /* doesn't return */ + cpu_throw(NULL, newtd); +} + /* - * A CPU is entering for the first time or a thread is exiting. + * A thread is exiting. */ void sched_throw(struct thread *td) @@ -2994,30 +3034,20 @@ struct tdq *tdq; tdq = TDQ_SELF(); - if (__predict_false(td == NULL)) { - TDQ_LOCK(tdq); - /* Correct spinlock nesting. */ - spinlock_exit(); - PCPU_SET(switchtime, cpu_ticks()); - PCPU_SET(switchticks, ticks); - } else { - THREAD_LOCK_ASSERT(td, MA_OWNED); - THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); - tdq_load_rem(tdq, td); - td->td_lastcpu = td->td_oncpu; - td->td_oncpu = NOCPU; - thread_lock_block(td); - } - newtd = choosethread(); - spinlock_enter(); - TDQ_UNLOCK(tdq); - KASSERT(curthread->td_md.md_spinlock_count == 1, - ("invalid count %d", curthread->td_md.md_spinlock_count)); + + MPASS(td != NULL); + THREAD_LOCK_ASSERT(td, MA_OWNED); + THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); + + tdq_load_rem(tdq, td); + td->td_lastcpu = td->td_oncpu; + td->td_oncpu = NOCPU; + thread_lock_block(td); + + newtd = sched_throw_grab(tdq); + /* doesn't return */ - if (__predict_false(td == NULL)) - cpu_throw(td, newtd); /* doesn't return */ - else - cpu_switch(td, newtd, TDQ_LOCKPTR(tdq)); + cpu_switch(td, newtd, TDQ_LOCKPTR(tdq)); } /* Index: sys/mips/mips/mp_machdep.c =================================================================== --- sys/mips/mips/mp_machdep.c +++ sys/mips/mips/mp_machdep.c @@ -335,7 +335,7 @@ cpu_initclocks_ap(); /* enter the scheduler */ - sched_throw(NULL); + sched_ap_entry(); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ Index: sys/powerpc/powerpc/mp_machdep.c =================================================================== --- sys/powerpc/powerpc/mp_machdep.c +++ sys/powerpc/powerpc/mp_machdep.c @@ -112,7 +112,7 @@ cpu_initclocks_ap(); /* Announce ourselves awake, and enter the scheduler */ - sched_throw(NULL); + sched_ap_entry(); } void Index: sys/riscv/riscv/mp_machdep.c =================================================================== --- sys/riscv/riscv/mp_machdep.c +++ sys/riscv/riscv/mp_machdep.c @@ -291,7 +291,7 @@ MPASS(PCPU_GET(curpcb) == NULL); /* Enter the scheduler */ - sched_throw(NULL); + sched_ap_entry(); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ Index: sys/sys/sched.h =================================================================== --- sys/sys/sched.h +++ sys/sys/sched.h @@ -101,6 +101,7 @@ void sched_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td, int prio); void sched_switch(struct thread *td, int flags); +void sched_ap_entry(void); void sched_throw(struct thread *td); void sched_unlend_prio(struct thread *td, u_char prio); void sched_user_prio(struct thread *td, u_char prio); Index: sys/x86/x86/mp_x86.c =================================================================== --- sys/x86/x86/mp_x86.c +++ sys/x86/x86/mp_x86.c @@ -1099,7 +1099,7 @@ */ MPASS(PCPU_GET(curpcb) == NULL); - sched_throw(NULL); + sched_ap_entry(); panic("scheduler returned us to %s", __func__); /* NOTREACHED */