diff --git a/share/man/man9/callout.9 b/share/man/man9/callout.9 --- a/share/man/man9/callout.9 +++ b/share/man/man9/callout.9 @@ -29,7 +29,7 @@ .\" .\" $FreeBSD$ .\" -.Dd December 13, 2019 +.Dd September 1, 2021 .Dt CALLOUT 9 .Os .Sh NAME @@ -241,9 +241,6 @@ This ensures that stopping or rescheduling the callout will abort any previously scheduled invocation. .Pp -Only regular mutexes may be used with -.Fn callout_init_mtx ; -spin mutexes are not supported. A sleepable read-mostly lock .Po one initialized with the diff --git a/share/man/man9/taskqueue.9 b/share/man/man9/taskqueue.9 --- a/share/man/man9/taskqueue.9 +++ b/share/man/man9/taskqueue.9 @@ -28,7 +28,7 @@ .\" .\" $FreeBSD$ .\" -.Dd June 6, 2020 +.Dd September 1, 2021 .Dt TASKQUEUE 9 .Os .Sh NAME @@ -237,9 +237,6 @@ .Va flags , as detailed in .Xr callout 9 . -Only non-fast task queues can be used for -.Va timeout_task -scheduling. If the .Va ticks argument is negative, the already scheduled enqueueing is not re-scheduled. diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -215,7 +215,7 @@ lock_spin(struct lock_object *lock, uintptr_t how) { - panic("spin locks can only use msleep_spin"); + mtx_lock_spin((struct mtx *)lock); } uintptr_t @@ -232,8 +232,12 @@ uintptr_t unlock_spin(struct lock_object *lock) { + struct mtx *m; - panic("spin locks can only use msleep_spin"); + m = (struct mtx *)lock; + mtx_assert(m, MA_OWNED | MA_NOTRECURSED); + mtx_unlock_spin(m); + return (0); } #ifdef KDTRACE_HOOKS diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -188,6 +188,8 @@ DROP_GIANT(); if (lock != NULL && lock != &Giant.lock_object && !(class->lc_flags & LC_SLEEPABLE)) { + KASSERT(!(class->lc_flags & LC_SPINLOCK), + ("spin locks can only use msleep_spin")); WITNESS_SAVE(lock, lock_witness); lock_state = class->lc_unlock(lock); } else diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -919,8 +919,9 @@ } else { direct = 0; } - KASSERT(!direct || c->c_lock == NULL, - ("%s: direct callout %p has lock", __func__, c)); + KASSERT(!direct || c->c_lock == NULL || + (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK), + ("%s: direct callout %p has non-spin lock", __func__, c)); cc = callout_lock(c); /* * Don't allow migration if the user does not care. @@ -1332,9 +1333,8 @@ ("callout_init_lock: bad flags %d", flags)); KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); - KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & - (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", - __func__)); + KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE), + ("%s: callout %p has sleepable lock", __func__, c)); c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); c->c_cpu = cc_default_cpu; } diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c --- a/sys/kern/subr_taskqueue.c +++ b/sys/kern/subr_taskqueue.c @@ -309,7 +309,6 @@ TQ_LOCK(queue); KASSERT(timeout_task->q == NULL || timeout_task->q == queue, ("Migrated queue")); - KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); timeout_task->q = queue; res = timeout_task->t.ta_pending; if (timeout_task->f & DT_DRAIN_IN_PROGRESS) { @@ -329,6 +328,8 @@ sbt = -sbt; /* Ignore overflow. */ } if (sbt > 0) { + if (queue->tq_spin) + flags |= C_DIRECT_EXEC; callout_reset_sbt(&timeout_task->c, sbt, pr, taskqueue_timeout_func, timeout_task, flags); }