diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 2ab7a0938e54..2f138637141d 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -1,954 +1,1001 @@ /*- * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ */ /* * Machine independent bits of mutex implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include "opt_global.h" #include "opt_hwpmc_hooks.h" #include "opt_kdtrace.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) #define ADAPTIVE_MUTEXES #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DEFINE( , , lock, failed); #endif +/* + * Return the mutex address when the lock cookie address is provided. + * This functionality assumes that struct mtx* have a member named mtx_lock. + */ +#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) + /* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) static void assert_mtx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif static void lock_mtx(struct lock_object *lock, int how); static void lock_spin(struct lock_object *lock, int how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif static int unlock_mtx(struct lock_object *lock); static int unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. */ struct lock_class lock_class_mtx_sleep = { .lc_name = "sleep mutex", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_mtx, .lc_unlock = unlock_mtx, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; struct lock_class lock_class_mtx_spin = { .lc_name = "spin mutex", .lc_flags = LC_SPINLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_spin, .lc_unlock = unlock_spin, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; /* * System-wide mutexes */ struct mtx blocked_lock; struct mtx Giant; void assert_mtx(const struct lock_object *lock, int what) { mtx_assert((const struct mtx *)lock, what); } void lock_mtx(struct lock_object *lock, int how) { mtx_lock((struct mtx *)lock); } void lock_spin(struct lock_object *lock, int how) { panic("spin locks can only use msleep_spin"); } int unlock_mtx(struct lock_object *lock) { struct mtx *m; m = (struct mtx *)lock; mtx_assert(m, MA_OWNED | MA_NOTRECURSED); mtx_unlock(m); return (0); } int unlock_spin(struct lock_object *lock) { panic("spin locks can only use msleep_spin"); } #ifdef KDTRACE_HOOKS int owner_mtx(const struct lock_object *lock, struct thread **owner) { const struct mtx *m = (const struct mtx *)lock; *owner = mtx_owner(m); return (mtx_unowned(m) == 0); } #endif /* * Function versions of the inlined __mtx_* macros. These are used by * modules and can also be called from assembly language if needed. */ void -_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); __mtx_lock(m, curthread, opts, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); curthread->td_locks++; } void -_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); curthread->td_locks--; WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m); __mtx_unlock(m, curthread, opts, file, line); } void -_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_lock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); __mtx_lock_spin(m, curthread, opts, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } void -_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_spin(m); } /* * The important part of mtx_trylock{,_flags}() * Tries to acquire lock `m.' If this function is called on a mutex that * is already owned, it will recursively acquire the lock. */ int -mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) +_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; if (SCHEDULER_STOPPED()) return (1); + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); rval = 1; } else rval = _mtx_obtain_lock(m, (uintptr_t)curthread); LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); if (rval) { WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); curthread->td_locks++; if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, waittime, file, line); } return (rval); } /* - * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. + * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; struct turnstile *ts; uintptr_t v; #ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; #endif #ifdef KTR int cont_logged = 0; #endif #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS uint64_t spin_cnt = 0; uint64_t sleep_cnt = 0; int64_t sleep_time = 0; #endif if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_owned(m)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); while (!_mtx_obtain_lock(m, tid)) { #ifdef KDTRACE_HOOKS spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* * If the owner is running on another CPU, spin until the * owner stops running or the state of the lock changes. */ v = m->mtx_lock; if (v != MTX_UNOWNED) { owner = (struct thread *)(v & ~MTX_FLAGMASK); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&m->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); #ifdef KDTRACE_HOOKS spin_cnt++; #endif } continue; } } #endif ts = turnstile_trywait(&m->lock_object); v = m->mtx_lock; /* * Check if the lock has been released while spinning for * the turnstile chain lock. */ if (v == MTX_UNOWNED) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_MUTEXES /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ owner = (struct thread *)(v & ~MTX_FLAGMASK); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } #endif /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. */ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { turnstile_cancel(ts); continue; } /* * We definitely must sleep for this lock. */ mtx_assert(m, MA_NOTOWNED); #ifdef KTR if (!cont_logged) { CTR6(KTR_CONTENTION, "contention: %p at %s:%d wants %s, taken by %s:%d", (void *)tid, file, line, m->lock_object.lo_name, WITNESS_FILE(&m->lock_object), WITNESS_LINE(&m->lock_object)); cont_logged = 1; } #endif /* * Block on the turnstile. */ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(); #endif turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(); sleep_cnt++; #endif } #ifdef KTR if (cont_logged) { CTR4(KTR_CONTENTION, "contention end: %s acquired by %p at %s:%d", m->lock_object.lo_name, (void *)tid, file, line); } #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (sleep_time) LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); /* * Only record the loops spinning and not sleeping. */ if (spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); #endif } static void _mtx_lock_spin_failed(struct mtx *m) { struct thread *td; td = mtx_owner(m); /* If the mutex is unlocked, try again. */ if (td == NULL) return; printf( "spin lock %p (%s) held by %p (tid %d) too long\n", m, m->lock_object.lo_name, td, td->td_tid); #ifdef WITNESS witness_display_spinlock(&m->lock_object, td, printf); #endif panic("spin lock held too long"); } #ifdef SMP /* - * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. + * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void -_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; int i = 0; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); while (!_mtx_obtain_lock(m, tid)) { /* Give interrupts a chance while we spin. */ spinlock_exit(); while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 10000000) { cpu_spinwait(); continue; } if (i < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } spinlock_enter(); } if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); } #endif /* SMP */ void thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid; int i; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS uint64_t spin_cnt = 0; #endif i = 0; tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) return; for (;;) { retry: spinlock_enter(); m = td->td_lock; KASSERT(m->mtx_lock != MTX_DESTROYED, ("thread_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("thread_lock() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); while (!_mtx_obtain_lock(m, tid)) { #ifdef KDTRACE_HOOKS spin_cnt++; #endif if (m->mtx_lock == tid) { m->mtx_recurse++; break; } #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 10000000) cpu_spinwait(); else if (i < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); if (m != td->td_lock) goto retry; } spinlock_enter(); } if (m == td->td_lock) break; __mtx_unlock_spin(m); /* does spinlock_exit() */ #ifdef KDTRACE_HOOKS spin_cnt++; #endif } if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); } struct mtx * thread_lock_block(struct thread *td) { struct mtx *lock; THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = &blocked_lock; mtx_unlock_spin(lock); return (lock); } void thread_lock_unblock(struct thread *td, struct mtx *new) { mtx_assert(new, MA_OWNED); MPASS(td->td_lock == &blocked_lock); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); } void thread_lock_set(struct thread *td, struct mtx *new) { struct mtx *lock; mtx_assert(new, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = new; mtx_unlock_spin(lock); } /* - * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. + * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void -_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; struct turnstile *ts; if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); return; } /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ turnstile_chain_lock(&m->lock_object); ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); MPASS(ts != NULL); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _mtx_release_lock_quick(m); /* * This turnstile is now no longer associated with the mutex. We can * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&m->lock_object); } /* * All the unlocking of MTX_SPIN locks is done inline. * See the __mtx_unlock_spin() macro for the details. */ /* * The backing function for the INVARIANTS-enabled mtx_assert() */ #ifdef INVARIANT_SUPPORT void -_mtx_assert(const struct mtx *m, int what, const char *file, int line) +__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { + const struct mtx *m; if (panicstr != NULL || dumping) return; + + m = mtxlock2mtx(c); + switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: case MA_OWNED | MA_NOTRECURSED: if (!mtx_owned(m)) panic("mutex %s not owned at %s:%d", m->lock_object.lo_name, file, line); if (mtx_recursed(m)) { if ((what & MA_NOTRECURSED) != 0) panic("mutex %s recursed at %s:%d", m->lock_object.lo_name, file, line); } else if ((what & MA_RECURSED) != 0) { panic("mutex %s unrecursed at %s:%d", m->lock_object.lo_name, file, line); } break; case MA_NOTOWNED: if (mtx_owned(m)) panic("mutex %s owned at %s:%d", m->lock_object.lo_name, file, line); break; default: panic("unknown mtx_assert at %s:%d", file, line); } } #endif /* * The MUTEX_DEBUG-enabled mtx_validate() * * Most of these checks have been moved off into the LO_INITIALIZED flag * maintained by the witness code. */ #ifdef MUTEX_DEBUG void mtx_validate(struct mtx *); void mtx_validate(struct mtx *m) { /* * XXX: When kernacc() does not require Giant we can reenable this check */ #ifdef notyet /* * Can't call kernacc() from early init386(), especially when * initializing Giant mutex, because some stuff in kernacc() * requires Giant itself. */ if (!cold) if (!kernacc((caddr_t)m, sizeof(m), VM_PROT_READ | VM_PROT_WRITE)) panic("Can't read and write to mutex %p", m); #endif } #endif /* * General init routine used by the MTX_SYSINIT() macro. */ void mtx_sysinit(void *arg) { struct mtx_args *margs = arg; - mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); + mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, + margs->ma_opts); } /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional * lock type `type' is used as a general lock category name for use with * witness. */ void -mtx_init(struct mtx *m, const char *name, const char *type, int opts) +_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { + struct mtx *m; struct lock_class *class; int flags; + m = mtxlock2mtx(c); + MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", __func__, name, &m->mtx_lock)); #ifdef MUTEX_DEBUG /* Diagnostic and error correction */ mtx_validate(m); #endif /* Determine lock class and lock flags. */ if (opts & MTX_SPIN) class = &lock_class_mtx_spin; else class = &lock_class_mtx_sleep; flags = 0; if (opts & MTX_QUIET) flags |= LO_QUIET; if (opts & MTX_RECURSE) flags |= LO_RECURSABLE; if ((opts & MTX_NOWITNESS) == 0) flags |= LO_WITNESS; if (opts & MTX_DUPOK) flags |= LO_DUPOK; if (opts & MTX_NOPROFILE) flags |= LO_NOPROFILE; /* Initialize mutex. */ m->mtx_lock = MTX_UNOWNED; m->mtx_recurse = 0; lock_init(&m->lock_object, class, name, type, flags); } /* * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be * passed in as a flag here because if the corresponding mtx_init() was * called with MTX_QUIET set, then it will already be set in the mutex's * flags. */ void -mtx_destroy(struct mtx *m) +_mtx_destroy(volatile uintptr_t *c) { + struct mtx *m; + + m = mtxlock2mtx(c); if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) spinlock_exit(); else curthread->td_locks--; lock_profile_release_lock(&m->lock_object); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); } m->mtx_lock = MTX_DESTROYED; lock_destroy(&m->lock_object); } /* * Intialize the mutex code and system mutexes. This is called from the MD * startup code prior to mi_startup(). The per-CPU data space needs to be * setup before this is called. */ void mutex_init(void) { /* Setup turnstiles so that sleep mutexes work. */ init_turnstiles(); /* * Initialize mutexes. */ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); mtx_init(&devmtx, "cdev", NULL, MTX_DEF); mtx_lock(&Giant); } #ifdef DDB void db_show_mtx(const struct lock_object *lock) { struct thread *td; const struct mtx *m; m = (const struct mtx *)lock; db_printf(" flags: {"); if (LOCK_CLASS(lock) == &lock_class_mtx_spin) db_printf("SPIN"); else db_printf("DEF"); if (m->lock_object.lo_flags & LO_RECURSABLE) db_printf(", RECURSE"); if (m->lock_object.lo_flags & LO_DUPOK) db_printf(", DUPOK"); db_printf("}\n"); db_printf(" state: {"); if (mtx_unowned(m)) db_printf("UNOWNED"); else if (mtx_destroyed(m)) db_printf("DESTROYED"); else { db_printf("OWNED"); if (m->mtx_lock & MTX_CONTESTED) db_printf(", CONTESTED"); if (m->mtx_lock & MTX_RECURSED) db_printf(", RECURSED"); } db_printf("}\n"); if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } } #endif diff --git a/sys/sys/_mutex.h b/sys/sys/_mutex.h index e61954e78082..8670485adcb9 100644 --- a/sys/sys/_mutex.h +++ b/sys/sys/_mutex.h @@ -1,42 +1,64 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS__MUTEX_H_ #define _SYS__MUTEX_H_ +#include + /* * Sleep/spin mutex. + * + * The layout of the first 2 members of struct mtx* is considered fixed. + * More specifically, it is assumed that there is a member called mtx_lock + * for every struct mtx* and that other locking primitive structures are + * not allowed to use such name for their members. + * If this needs to change, the bits in the mutex implementation might be + * modified appropriately. */ struct mtx { struct lock_object lock_object; /* Common lock properties. */ volatile uintptr_t mtx_lock; /* Owner and flags. */ }; +/* + * Members of struct mtx_padalign must mirror members of struct mtx. + * mtx_padalign mutexes can use mtx(9) KPI transparently, without modifies. + * When using pad-aligned mutexes within structures, they should generally + * stay as the first member of the struct. This is because otherwise the + * compiler can generate ever more padding for the struct to keep a correct + * alignment for the mutex. + */ +struct mtx_padalign { + struct lock_object lock_object; /* Common lock properties. */ + volatile uintptr_t mtx_lock; /* Owner and flags. */ +} __aligned(CACHE_LINE_SIZE); + #endif /* !_SYS__MUTEX_H_ */ diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index d39df0f78b3e..bd610fd488fc 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -1,434 +1,471 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ * $FreeBSD$ */ #ifndef _SYS_MUTEX_H_ #define _SYS_MUTEX_H_ #include #include #include #ifdef _KERNEL #include #include #include #include #include /* * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK * can also be passed in. */ #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ /* * Option flags passed to certain lock/unlock routines, through the use * of corresponding mtx_{lock,unlock}_flags() interface macros. */ #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ /* * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, * with the exception of MTX_UNOWNED, applies to spin locks. */ #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED) /* * Value stored in mutex->mtx_lock to denote a destroyed mutex. */ #define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED) /* * Prototypes * * NOTE: Functions prepended with `_' (underscore) are exported to other parts * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE - * and LOCK_LINE. These functions should not be called directly by any + * and LOCK_LINE or for hiding the lock cookie crunching to the + * consumers. These functions should not be called directly by any * code using the API. Their macros cover their functionality. * Functions with a `_' suffix are the entrypoint for the common * KPI covering both compat shims and fast path case. These can be * used by consumers willing to pass options, file and line * informations, in an option-independent way. * * [See below for descriptions] * */ -void mtx_init(struct mtx *m, const char *name, const char *type, int opts); -void mtx_destroy(struct mtx *m); +void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, + int opts); +void _mtx_destroy(volatile uintptr_t *c); void mtx_sysinit(void *arg); -int mtx_trylock_flags_(struct mtx *m, int opts, const char *file, +int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); -void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, +void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); -void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); +void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, + int line); #ifdef SMP -void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); #endif -void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, - int line); -void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, +void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); +void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, + const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void _mtx_assert(const struct mtx *m, int what, const char *file, int line); +void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, + int line); #endif void thread_lock_flags_(struct thread *, int, const char *, int); #define thread_lock(tdp) \ thread_lock_flags_((tdp), 0, __FILE__, __LINE__) #define thread_lock_flags(tdp, opt) \ thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) #define thread_unlock(tdp) \ mtx_unlock_spin((tdp)->td_lock) +/* + * Top-level macros to provide lock cookie once the actual mtx is passed. + * They will also prevent passing a malformed object to the mtx KPI by + * failing compilation. + */ +#define mtx_init(m, n, t, o) \ + _mtx_init(&(m)->mtx_lock, n, t, o) +#define mtx_destroy(m) \ + _mtx_destroy(&(m)->mtx_lock) +#define mtx_trylock_flags_(m, o, f, l) \ + _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_sleep(m, t, o, f, l) \ + __mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l) +#define _mtx_unlock_sleep(m, o, f, l) \ + __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) +#ifdef SMP +#define _mtx_lock_spin(m, t, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l) +#endif +#define _mtx_lock_flags(m, o, f, l) \ + __mtx_lock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_flags(m, o, f, l) \ + __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_spin_flags(m, o, f, l) \ + __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_spin_flags(m, o, f, l) \ + __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +#define _mtx_assert(m, w, f, l) \ + __mtx_assert(&(m)->mtx_lock, w, f, l) +#endif + #define mtx_recurse lock_object.lo_data /* Very simple operations on mtx_lock. */ /* Try to obtain mtx_lock once. */ #define _mtx_obtain_lock(mp, tid) \ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) /* Try to release mtx_lock if it is unrecursed and uncontested. */ #define _mtx_release_lock(mp, tid) \ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) /* Release mtx_lock quickly, assuming we own it. */ #define _mtx_release_lock_quick(mp) \ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) /* * Full lock operations that are suitable to be inlined in non-debug * kernels. If the lock cannot be acquired or released trivially then * the work is deferred to another function. */ /* Lock a normal mutex. */ #define __mtx_lock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_mtx_obtain_lock((mp), _tid)) \ _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ } while (0) /* * Lock a spin mutex. For spinlocks, we handle recursion inline (it * turns out that function calls can be significantly expensive on * some architectures). Since spin locks are not _too_ common, * inlining this code is not too big a deal. */ #ifdef SMP #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ spinlock_enter(); \ if (!_mtx_obtain_lock((mp), _tid)) { \ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else \ _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ } else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ } while (0) #else /* SMP */ #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ spinlock_enter(); \ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else { \ KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ (mp)->mtx_lock = _tid; \ } \ } while (0) #endif /* SMP */ /* Unlock a normal mutex. */ #define __mtx_unlock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_mtx_release_lock((mp), _tid)) \ _mtx_unlock_sleep((mp), (opts), (file), (line)); \ } while (0) /* * Unlock a spin mutex. For spinlocks, we can handle everything * inline, as it's pretty simple and a function call would be too * expensive (at least on some architectures). Since spin locks are * not _too_ common, inlining this code is not too big a deal. * * Since we always perform a spinlock_enter() when attempting to acquire a * spin lock, we need to always perform a matching spinlock_exit() when * releasing a spin lock. This includes the recursion cases. */ #ifdef SMP #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ mp); \ _mtx_release_lock_quick((mp)); \ } \ spinlock_exit(); \ } while (0) #else /* SMP */ #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ mp); \ (mp)->mtx_lock = MTX_UNOWNED; \ } \ spinlock_exit(); \ } while (0) #endif /* SMP */ /* * Exported lock manipulation interface. * * mtx_lock(m) locks MTX_DEF mutex `m' * * mtx_lock_spin(m) locks MTX_SPIN mutex `m' * * mtx_unlock(m) unlocks MTX_DEF mutex `m' * * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' * * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' * and passes option flags `opts' to the "hard" function, if required. * With these routines, it is possible to pass flags such as MTX_QUIET * to the appropriate lock manipulation routines. * * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if * it cannot. Rather, it returns 0 on failure and non-zero on success. * It does NOT handle recursion as we assume that if a caller is properly * using this part of the interface, he will know that the lock in question * is _not_ recursed. * * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts * relevant option flags `opts.' * * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. * * mtx_owned(m) returns non-zero if the current thread owns the lock `m' * * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. */ #define mtx_lock(m) mtx_lock_flags((m), 0) #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) #define mtx_trylock(m) mtx_trylock_flags((m), 0) #define mtx_unlock(m) mtx_unlock_flags((m), 0) #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) struct mtx_pool; struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); void mtx_pool_destroy(struct mtx_pool **poolp); struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); struct mtx *mtx_pool_alloc(struct mtx_pool *pool); #define mtx_pool_lock(pool, ptr) \ mtx_lock(mtx_pool_find((pool), (ptr))) #define mtx_pool_lock_spin(pool, ptr) \ mtx_lock_spin(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock(pool, ptr) \ mtx_unlock(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock_spin(pool, ptr) \ mtx_unlock_spin(mtx_pool_find((pool), (ptr))) /* * mtxpool_lockbuilder is a pool of sleep locks that is not witness * checked and should only be used for building higher level locks. * * mtxpool_sleep is a general purpose pool of sleep mutexes. */ extern struct mtx_pool *mtxpool_lockbuilder; extern struct mtx_pool *mtxpool_sleep; #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) #define mtx_lock_flags_(m, opts, file, line) \ _mtx_lock_flags((m), (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ _mtx_unlock_flags((m), (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ _mtx_lock_spin_flags((m), (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ _mtx_unlock_spin_flags((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ #define mtx_lock_flags_(m, opts, file, line) \ __mtx_lock((m), curthread, (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ __mtx_unlock((m), curthread, (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ __mtx_lock_spin((m), curthread, (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ __mtx_unlock_spin((m)) #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ #ifdef INVARIANTS #define mtx_assert_(m, what, file, line) \ _mtx_assert((m), (what), (file), (line)) #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) #else /* INVARIANTS */ #define mtx_assert_(m, what, file, line) (void)0 #define GIANT_REQUIRED #endif /* INVARIANTS */ #define mtx_lock_flags(m, opts) \ mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_flags(m, opts) \ mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_lock_spin_flags(m, opts) \ mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_spin_flags(m, opts) \ mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_flags(m, opts) \ mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_assert(m, what) \ mtx_assert_((m), (what), __FILE__, __LINE__) #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo)) #define mtx_initialized(m) lock_initalized(&(m)->lock_object) #define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread) #define mtx_recursed(m) ((m)->mtx_recurse != 0) #define mtx_name(m) ((m)->lock_object.lo_name) /* * Global locks. */ extern struct mtx Giant; extern struct mtx blocked_lock; /* * Giant lock manipulation and clean exit macros. * Used to replace return with an exit Giant and return. * * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. */ #ifndef DROP_GIANT #define DROP_GIANT() \ do { \ int _giantcnt = 0; \ WITNESS_SAVE_DECL(Giant); \ \ if (mtx_owned(&Giant)) { \ WITNESS_SAVE(&Giant.lock_object, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant) && \ !SCHEDULER_STOPPED(); _giantcnt++) \ mtx_unlock(&Giant); \ } #define PICKUP_GIANT() \ PARTIAL_PICKUP_GIANT(); \ } while (0) #define PARTIAL_PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ if (_giantcnt > 0) { \ while (_giantcnt--) \ mtx_lock(&Giant); \ WITNESS_RESTORE(&Giant.lock_object, Giant); \ } #endif #define UGAR(rval) do { \ int _val = (rval); \ mtx_unlock(&Giant); \ return (_val); \ } while (0) struct mtx_args { - struct mtx *ma_mtx; + void *ma_mtx; const char *ma_desc; int ma_opts; }; #define MTX_SYSINIT(name, mtx, desc, opts) \ static struct mtx_args name##_args = { \ (mtx), \ (desc), \ (opts) \ }; \ SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ mtx_sysinit, &name##_args); \ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ - mtx_destroy, (mtx)) + _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) /* * The INVARIANTS-enabled mtx_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure * support as _mtx_assert() itself uses them and the latter implies that * _mtx_assert() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define MA_OWNED LA_XLOCKED #define MA_NOTOWNED LA_UNLOCKED #define MA_RECURSED LA_RECURSED #define MA_NOTRECURSED LA_NOTRECURSED #endif /* * Common lock type names. */ #define MTX_NETWORK_LOCK "network driver" #endif /* _KERNEL */ #endif /* _SYS_MUTEX_H_ */