Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c +++ sys/kern/kern_timeout.c @@ -163,6 +163,7 @@ sbintime_t cc_lastscan; void *cc_cookie; u_int cc_bucket; + u_int cc_inited; char cc_ktr_event_name[20]; }; @@ -225,7 +226,6 @@ cc_cce_cleanup(struct callout_cpu *cc, int direct) { - cc_exec_curr(cc, direct) = NULL; cc_exec_cancel(cc, direct) = false; cc_exec_waiting(cc, direct) = false; #ifdef SMP @@ -266,6 +266,7 @@ * XXX: Clip callout to result of previous function of maxusers * maximum 384. This is still huge, but acceptable. */ + memset(cc_cpu, 0, sizeof(cc_cpu)); ncallout = imin(16 + maxproc + maxfiles, 18508); TUNABLE_INT_FETCH("kern.ncallout", &ncallout); @@ -307,6 +308,7 @@ mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); SLIST_INIT(&cc->cc_callfree); + cc->cc_inited = 1; cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, M_CALLOUT, M_WAITOK); for (i = 0; i < callwheelsize; i++) @@ -313,8 +315,10 @@ LIST_INIT(&cc->cc_callwheel[i]); TAILQ_INIT(&cc->cc_expireq); cc->cc_firstevent = SBT_MAX; - for (i = 0; i < 2; i++) + for (i = 0; i < 2; i++) { + cc_exec_curr(cc, i) = NULL; cc_cce_cleanup(cc, i); + } snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), "callwheel cpu %d", cpu); if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ @@ -347,6 +351,7 @@ * may be willing to acquire the callout cpu lock. */ c->c_cpu = CPUBLOCK; + c->l_cpu = new_cpu; spinlock_enter(); CC_UNLOCK(cc); new_cc = CC_CPU(new_cpu); @@ -572,6 +577,16 @@ return (cc); } +void +callout_deactivate(struct callout *c) +{ + struct callout_cpu *cc; + + cc = callout_lock(c); + c->c_flags &= ~CALLOUT_ACTIVE; + CC_UNLOCK(cc); +} + static void callout_cc_add(struct callout *c, struct callout_cpu *cc, sbintime_t sbt, sbintime_t precision, void (*func)(void *), @@ -945,6 +960,17 @@ int cancelled, direct; cancelled = 0; + if ((cpu >= CPUBLOCK) || (cpu < 0)) { + /* + * Most likely CPUBLOCK was set in c->c_cpu + * and this was passed in via macro, fix it + * to be the l_cpu we were on. + */ + cpu = c->l_cpu; + } else if (cc_cpu[cpu].cc_inited == 0) { + /* Invalid CPU spec */ + cpu = c->l_cpu; + } if (flags & C_ABSOLUTE) { to_sbt = sbt; } else { @@ -998,9 +1024,10 @@ * wrong direct flag if we don't do it before we add. */ if (flags & C_DIRECT_EXEC) { - c->c_flags |= CALLOUT_DIRECT; + direct = 1; + } else { + direct = 0; } - direct = (c->c_flags & CALLOUT_DIRECT) != 0; KASSERT(!direct || c->c_lock == NULL, ("%s: direct callout %p has lock", __func__, c)); cc = callout_lock(c); @@ -1145,7 +1172,11 @@ } } else use_lock = 0; - direct = (c->c_flags & CALLOUT_DIRECT) != 0; + if (c->c_flags & CALLOUT_DIRECT) { + direct = 1; + } else { + direct = 0; + } sq_locked = 0; old_cc = NULL; again: @@ -1281,6 +1312,10 @@ c, c->c_func, c->c_arg); KASSERT(!cc_cce_migrating(cc, direct), ("callout wrongly scheduled for migration")); + if (callout_migrating(c)) { + c->c_flags &= ~CALLOUT_DFRMIGRATION; + cc_cce_cleanup(cc, direct); + } CC_UNLOCK(cc); KASSERT(!sq_locked, ("sleepqueue chain locked")); return (1); @@ -1294,20 +1329,7 @@ * we return 0. */ c->c_flags &= ~CALLOUT_DFRMIGRATION; -#ifdef SMP - /* - * We can't call cc_cce_cleanup here since - * if we do it will remove .ce_curr and - * its still running. This will prevent a - * reschedule of the callout when the - * execution completes. - */ - cc_migration_cpu(cc, direct) = CPUBLOCK; - cc_migration_time(cc, direct) = 0; - cc_migration_prec(cc, direct) = 0; - cc_migration_func(cc, direct) = NULL; - cc_migration_arg(cc, direct) = NULL; -#endif + cc_cce_cleanup(cc, direct); CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", c, c->c_func, c->c_arg); CC_UNLOCK(cc); @@ -1350,7 +1372,7 @@ c->c_lock = &Giant.lock_object; c->c_flags = 0; } - c->c_cpu = timeout_cpu; + c->l_cpu = c->c_cpu = timeout_cpu; } void @@ -1366,7 +1388,7 @@ (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", __func__)); c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); - c->c_cpu = timeout_cpu; + c->l_cpu = c->c_cpu = timeout_cpu; } #ifdef APM_FIXUP_CALLTODO Index: sys/sys/_callout.h =================================================================== --- sys/sys/_callout.h +++ sys/sys/_callout.h @@ -59,6 +59,7 @@ struct lock_object *c_lock; /* lock to handle */ int c_flags; /* state of this entry */ volatile int c_cpu; /* CPU we're scheduled on */ + volatile int l_cpu; /* CPU we last were on */ }; #endif Index: sys/sys/callout.h =================================================================== --- sys/sys/callout.h +++ sys/sys/callout.h @@ -65,7 +65,7 @@ #ifdef _KERNEL #define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE) #define callout_migrating(c) ((c)->c_flags & CALLOUT_DFRMIGRATION) -#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE) +void callout_deactivate(struct callout *c); #define callout_drain(c) _callout_stop_safe(c, 1) void callout_init(struct callout *, int); void _callout_init_lock(struct callout *, struct lock_object *, int);