diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -120,7 +120,7 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Waiting on \"%s\"", cvp->cv_description); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return; #ifdef KTRACE @@ -184,7 +184,7 @@ ("cv_wait_unlock cannot be used with Giant")); class = LOCK_CLASS(lock); - if (SCHEDULER_STOPPED_TD(td)) { + if (SCHEDULER_STOPPED()) { class->lc_unlock(lock); return; } @@ -241,7 +241,7 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Waiting on \"%s\"", cvp->cv_description); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (0); #ifdef KTRACE @@ -309,7 +309,7 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Waiting on \"%s\"", cvp->cv_description); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (0); #ifdef KTRACE @@ -379,7 +379,7 @@ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, "Waiting on \"%s\"", cvp->cv_description); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (0); #ifdef KTRACE diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -424,7 +424,7 @@ td = curthread; tid = (uintptr_t)td; - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), @@ -534,7 +534,7 @@ doing_lockprof = 1; #endif - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return; if (__predict_false(v == MTX_UNOWNED)) diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -307,7 +307,7 @@ td = curthread; tid = (uintptr_t)td; - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), @@ -666,7 +666,7 @@ td = curthread; - KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) || + KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || !TD_IS_IDLETHREAD(td), ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", td, rw->lock_object.lo_name, file, line)); diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -225,6 +225,7 @@ * to indicate that the kernel has already called panic. */ const char *panicstr __read_mostly; +bool scheduler_stopped __read_frequently; int dumping __read_mostly; /* system is dumping */ int rebooting __read_mostly; /* system is rebooting */ @@ -926,7 +927,7 @@ * Ensure that the scheduler is stopped while panicking, even if panic * has been entered from kdb. */ - td->td_stopsched = 1; + scheduler_stopped = true; bootopt = RB_AUTOBOOT; newpanic = 0; diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -350,7 +350,7 @@ td = curthread; tid = (uintptr_t)td; - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -158,7 +158,7 @@ else class = NULL; - if (SCHEDULER_STOPPED_TD(td)) { + if (SCHEDULER_STOPPED()) { if (lock != NULL && priority & PDROP) class->lc_unlock(lock); return (0); @@ -247,7 +247,7 @@ KASSERT(ident != NULL, ("msleep_spin_sbt: NULL ident")); KASSERT(TD_IS_RUNNING(td), ("msleep_spin_sbt: curthread not running")); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return (0); sleepq_lock(ident); @@ -511,7 +511,7 @@ */ if (kdb_active) kdb_switch(); - if (SCHEDULER_STOPPED_TD(td)) + if (SCHEDULER_STOPPED()) return; if (flags & SW_VOL) { td->td_ru.ru_nvcsw++; diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c --- a/sys/kern/subr_kdb.c +++ b/sys/kern/subr_kdb.c @@ -764,7 +764,7 @@ CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); #endif - curthread->td_stopsched = 1; + scheduler_stopped = true; did_stop_cpus = 1; } else did_stop_cpus = 0; @@ -801,7 +801,7 @@ kdb_active--; if (did_stop_cpus) { - curthread->td_stopsched = 0; + scheduler_stopped = false; #ifdef SMP CPU_AND(&other_cpus, &other_cpus, &stopped_cpus); restart_cpus(other_cpus); diff --git a/sys/sys/proc.h b/sys/sys/proc.h --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -270,7 +270,7 @@ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ - u_char td_stopsched; /* (k) Scheduler stopped. */ + u_char _td_pad0[2]; /* Available. */ int td_locks; /* (k) Debug: count of non-spin locks */ int td_rw_rlocks; /* (k) Count of rwlock read locks. */ int td_sx_slocks; /* (k) Count of sx shared locks. */ @@ -429,7 +429,7 @@ #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) do { \ - KASSERT(SCHEDULER_STOPPED_TD(td) || (td)->td_locks > 0, \ + KASSERT(SCHEDULER_STOPPED() || (td)->td_locks > 0, \ ("Thread %p owns no locks", (td))); \ (td)->td_locks--; \ } while (0) diff --git a/sys/sys/systm.h b/sys/sys/systm.h --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -99,17 +99,15 @@ #include /* curthread */ #include +extern bool scheduler_stopped; + /* * If we have already panic'd and this is the thread that called * panic(), then don't block on any mutexes but silently succeed. * Otherwise, the kernel will deadlock since the scheduler isn't * going to run the thread that holds any lock we need. */ -#define SCHEDULER_STOPPED_TD(td) ({ \ - MPASS((td) == curthread); \ - __predict_false((td)->td_stopsched); \ -}) -#define SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread) +#define SCHEDULER_STOPPED() __predict_false(scheduler_stopped) extern int osreldate;