Index: head/sys/kern/kern_mutex.c =================================================================== --- head/sys/kern/kern_mutex.c (revision 313274) +++ head/sys/kern/kern_mutex.c (revision 313275) @@ -1,1137 +1,1141 @@ /*- * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ */ /* * Machine independent bits of mutex implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) #define ADAPTIVE_MUTEXES #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DEFINE( , , lock, failed); #endif /* * Return the mutex address when the lock cookie address is provided. * This functionality assumes that struct mtx* have a member named mtx_lock. */ #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) /* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) static void assert_mtx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif static void lock_mtx(struct lock_object *lock, uintptr_t how); static void lock_spin(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_mtx(struct lock_object *lock); static uintptr_t unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. */ struct lock_class lock_class_mtx_sleep = { .lc_name = "sleep mutex", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_mtx, .lc_unlock = unlock_mtx, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; struct lock_class lock_class_mtx_spin = { .lc_name = "spin mutex", .lc_flags = LC_SPINLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_spin, .lc_unlock = unlock_spin, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; #ifdef ADAPTIVE_MUTEXES static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); static struct lock_delay_config __read_mostly mtx_delay = { .initial = 1000, .step = 500, .min = 100, .max = 5000, }; SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial, 0, ""); SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step, 0, ""); SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min, 0, ""); SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 0, ""); static void mtx_delay_sysinit(void *dummy) { mtx_delay.initial = mp_ncpus * 25; mtx_delay.step = (mp_ncpus * 25) / 2; mtx_delay.min = mp_ncpus * 5; mtx_delay.max = mp_ncpus * 25 * 10; } LOCK_DELAY_SYSINIT(mtx_delay_sysinit); #endif static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, "mtx spin debugging"); static struct lock_delay_config __read_mostly mtx_spin_delay = { .initial = 1000, .step = 500, .min = 100, .max = 5000, }; SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_spin_delay.initial, 0, ""); SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_spin_delay.step, 0, ""); SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_spin_delay.min, 0, ""); SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max, 0, ""); static void mtx_spin_delay_sysinit(void *dummy) { mtx_spin_delay.initial = mp_ncpus * 25; mtx_spin_delay.step = (mp_ncpus * 25) / 2; mtx_spin_delay.min = mp_ncpus * 5; mtx_spin_delay.max = mp_ncpus * 25 * 10; } LOCK_DELAY_SYSINIT(mtx_spin_delay_sysinit); /* * System-wide mutexes */ struct mtx blocked_lock; struct mtx Giant; void assert_mtx(const struct lock_object *lock, int what) { mtx_assert((const struct mtx *)lock, what); } void lock_mtx(struct lock_object *lock, uintptr_t how) { mtx_lock((struct mtx *)lock); } void lock_spin(struct lock_object *lock, uintptr_t how) { panic("spin locks can only use msleep_spin"); } uintptr_t unlock_mtx(struct lock_object *lock) { struct mtx *m; m = (struct mtx *)lock; mtx_assert(m, MA_OWNED | MA_NOTRECURSED); mtx_unlock(m); return (0); } uintptr_t unlock_spin(struct lock_object *lock) { panic("spin locks can only use msleep_spin"); } #ifdef KDTRACE_HOOKS int owner_mtx(const struct lock_object *lock, struct thread **owner) { const struct mtx *m; uintptr_t x; m = (const struct mtx *)lock; x = m->mtx_lock; *owner = (struct thread *)(x & ~MTX_FLAGMASK); return (x != MTX_UNOWNED); } #endif /* * Function versions of the inlined __mtx_* macros. These are used by * modules and can also be called from assembly language if needed. */ void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; + uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); - __mtx_lock(m, curthread, opts, file, line); + tid = (uintptr_t)curthread; + v = MTX_UNOWNED; + if (!_mtx_obtain_lock_fetch(m, &v, tid)) + _mtx_lock_sleep(m, v, tid, opts, file, line); + else + LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, + m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); - __mtx_unlock(m, curthread, opts, file, line); + __mtx_unlock_sleep(c, opts, file, line); TD_LOCKS_DEC(curthread); } void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_lock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); __mtx_lock_spin(m, curthread, opts, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); KASSERT((opts & MTX_RECURSE) == 0, ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); if (__mtx_trylock_spin(m, curthread, opts, file, line)) { LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); return (1); } LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); return (0); } void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_spin(m); } /* * The important part of mtx_trylock{,_flags}() * Tries to acquire lock `m.' If this function is called on a mutex that * is already owned, it will recursively acquire the lock. */ int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); rval = 1; } else rval = _mtx_obtain_lock(m, (uintptr_t)curthread); opts &= ~MTX_RECURSE; LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); if (rval) { WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); TD_LOCKS_INC(curthread); if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } return (rval); } /* * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; #ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; #endif #ifdef KTR int cont_logged = 0; #endif #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_MUTEXES) lock_delay_arg_init(&lda, &mtx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif m = mtxlock2mtx(c); if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } opts &= ~MTX_RECURSE; #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* * If the owner is running on another CPU, spin until the * owner stops running or the state of the lock changes. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&m->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); do { lock_delay(&lda); v = MTX_READ_VALUE(m); owner = lv_mtx_owner(v); } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); continue; } #endif ts = turnstile_trywait(&m->lock_object); v = MTX_READ_VALUE(m); /* * Check if the lock has been released while spinning for * the turnstile chain lock. */ if (v == MTX_UNOWNED) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_MUTEXES /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } #endif /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. */ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { turnstile_cancel(ts); v = MTX_READ_VALUE(m); continue; } /* * We definitely must sleep for this lock. */ mtx_assert(m, MA_NOTOWNED); #ifdef KTR if (!cont_logged) { CTR6(KTR_CONTENTION, "contention: %p at %s:%d wants %s, taken by %s:%d", (void *)tid, file, line, m->lock_object.lo_name, WITNESS_FILE(&m->lock_object), WITNESS_LINE(&m->lock_object)); cont_logged = 1; } #endif /* * Block on the turnstile. */ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&m->lock_object); #endif turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&m->lock_object); sleep_cnt++; #endif v = MTX_READ_VALUE(m); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&m->lock_object); #endif #ifdef KTR if (cont_logged) { CTR4(KTR_CONTENTION, "contention end: %s acquired by %p at %s:%d", m->lock_object.lo_name, (void *)tid, file, line); } #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (sleep_time) LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); /* * Only record the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); #endif } static void _mtx_lock_spin_failed(struct mtx *m) { struct thread *td; td = mtx_owner(m); /* If the mutex is unlocked, try again. */ if (td == NULL) return; printf( "spin lock %p (%s) held by %p (tid %d) too long\n", m, m->lock_object.lo_name, td, td->td_tid); #ifdef WITNESS witness_display_spinlock(&m->lock_object, td, printf); #endif panic("spin lock held too long"); } #ifdef SMP /* * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) { struct mtx *m; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif if (SCHEDULER_STOPPED()) return; lock_delay_arg_init(&lda, &mtx_spin_delay); m = mtxlock2mtx(c); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); #ifdef KDTRACE_HOOKS spin_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (lda.spin_cnt < 10000000) { lock_delay(&lda); } else { lda.spin_cnt++; if (lda.spin_cnt < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); #ifdef KDTRACE_HOOKS LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); if (spin_time != 0) LOCKSTAT_RECORD1(spin__spin, m, spin_time); #endif } #endif /* SMP */ void thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) { /* * Ensure that spinlock sections are balanced even when the * scheduler is stopped, since we may otherwise inadvertently * re-enable interrupts while dumping core. */ spinlock_enter(); return; } lock_delay_arg_init(&lda, &mtx_spin_delay); #ifdef KDTRACE_HOOKS spin_time -= lockstat_nsecs(&td->td_lock->lock_object); #endif for (;;) { retry: spinlock_enter(); m = td->td_lock; KASSERT(m->mtx_lock != MTX_DESTROYED, ("thread_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("thread_lock() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); for (;;) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; if (v == MTX_UNOWNED) continue; if (v == tid) { m->mtx_recurse++; break; } #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (lda.spin_cnt < 10000000) { lock_delay(&lda); } else { lda.spin_cnt++; if (lda.spin_cnt < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } if (m != td->td_lock) goto retry; v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (m == td->td_lock) break; __mtx_unlock_spin(m); /* does spinlock_exit() */ } #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); #ifdef KDTRACE_HOOKS if (spin_time != 0) LOCKSTAT_RECORD1(thread__spin, m, spin_time); #endif } struct mtx * thread_lock_block(struct thread *td) { struct mtx *lock; THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = &blocked_lock; mtx_unlock_spin(lock); return (lock); } void thread_lock_unblock(struct thread *td, struct mtx *new) { mtx_assert(new, MA_OWNED); MPASS(td->td_lock == &blocked_lock); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); } void thread_lock_set(struct thread *td, struct mtx *new) { struct mtx *lock; mtx_assert(new, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = new; mtx_unlock_spin(lock); } /* * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; - uintptr_t v; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); - v = MTX_READ_VALUE(m); - if (v == (uintptr_t)curthread) { + if (!mtx_recursed(m)) { + LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); if (_mtx_release_lock(m, (uintptr_t)curthread)) return; - } - - if (mtx_recursed(m)) { + } else { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); return; } /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ turnstile_chain_lock(&m->lock_object); ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); MPASS(ts != NULL); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _mtx_release_lock_quick(m); /* * This turnstile is now no longer associated with the mutex. We can * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&m->lock_object); } /* * All the unlocking of MTX_SPIN locks is done inline. * See the __mtx_unlock_spin() macro for the details. */ /* * The backing function for the INVARIANTS-enabled mtx_assert() */ #ifdef INVARIANT_SUPPORT void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct mtx *m; if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: case MA_OWNED | MA_NOTRECURSED: if (!mtx_owned(m)) panic("mutex %s not owned at %s:%d", m->lock_object.lo_name, file, line); if (mtx_recursed(m)) { if ((what & MA_NOTRECURSED) != 0) panic("mutex %s recursed at %s:%d", m->lock_object.lo_name, file, line); } else if ((what & MA_RECURSED) != 0) { panic("mutex %s unrecursed at %s:%d", m->lock_object.lo_name, file, line); } break; case MA_NOTOWNED: if (mtx_owned(m)) panic("mutex %s owned at %s:%d", m->lock_object.lo_name, file, line); break; default: panic("unknown mtx_assert at %s:%d", file, line); } } #endif /* * General init routine used by the MTX_SYSINIT() macro. */ void mtx_sysinit(void *arg) { struct mtx_args *margs = arg; mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); } /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional * lock type `type' is used as a general lock category name for use with * witness. */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { struct mtx *m; struct lock_class *class; int flags; m = mtxlock2mtx(c); MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", __func__, name, &m->mtx_lock)); /* Determine lock class and lock flags. */ if (opts & MTX_SPIN) class = &lock_class_mtx_spin; else class = &lock_class_mtx_sleep; flags = 0; if (opts & MTX_QUIET) flags |= LO_QUIET; if (opts & MTX_RECURSE) flags |= LO_RECURSABLE; if ((opts & MTX_NOWITNESS) == 0) flags |= LO_WITNESS; if (opts & MTX_DUPOK) flags |= LO_DUPOK; if (opts & MTX_NOPROFILE) flags |= LO_NOPROFILE; if (opts & MTX_NEW) flags |= LO_NEW; /* Initialize mutex. */ lock_init(&m->lock_object, class, name, type, flags); m->mtx_lock = MTX_UNOWNED; m->mtx_recurse = 0; } /* * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be * passed in as a flag here because if the corresponding mtx_init() was * called with MTX_QUIET set, then it will already be set in the mutex's * flags. */ void _mtx_destroy(volatile uintptr_t *c) { struct mtx *m; m = mtxlock2mtx(c); if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) spinlock_exit(); else TD_LOCKS_DEC(curthread); lock_profile_release_lock(&m->lock_object); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); } m->mtx_lock = MTX_DESTROYED; lock_destroy(&m->lock_object); } /* * Intialize the mutex code and system mutexes. This is called from the MD * startup code prior to mi_startup(). The per-CPU data space needs to be * setup before this is called. */ void mutex_init(void) { /* Setup turnstiles so that sleep mutexes work. */ init_turnstiles(); /* * Initialize mutexes. */ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); mtx_init(&devmtx, "cdev", NULL, MTX_DEF); mtx_lock(&Giant); } #ifdef DDB void db_show_mtx(const struct lock_object *lock) { struct thread *td; const struct mtx *m; m = (const struct mtx *)lock; db_printf(" flags: {"); if (LOCK_CLASS(lock) == &lock_class_mtx_spin) db_printf("SPIN"); else db_printf("DEF"); if (m->lock_object.lo_flags & LO_RECURSABLE) db_printf(", RECURSE"); if (m->lock_object.lo_flags & LO_DUPOK) db_printf(", DUPOK"); db_printf("}\n"); db_printf(" state: {"); if (mtx_unowned(m)) db_printf("UNOWNED"); else if (mtx_destroyed(m)) db_printf("DESTROYED"); else { db_printf("OWNED"); if (m->mtx_lock & MTX_CONTESTED) db_printf(", CONTESTED"); if (m->mtx_lock & MTX_RECURSED) db_printf(", RECURSED"); } db_printf("}\n"); if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } } #endif Index: head/sys/sys/lockstat.h =================================================================== --- head/sys/sys/lockstat.h (revision 313274) +++ head/sys/sys/lockstat.h (revision 313275) @@ -1,135 +1,143 @@ /*- * Copyright (c) 2008-2009 Stacey Son * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * DTrace lockstat provider definitions */ #ifndef _SYS_LOCKSTAT_H #define _SYS_LOCKSTAT_H #ifdef _KERNEL #include #include #include SDT_PROVIDER_DECLARE(lockstat); SDT_PROBE_DECLARE(lockstat, , , adaptive__acquire); SDT_PROBE_DECLARE(lockstat, , , adaptive__release); SDT_PROBE_DECLARE(lockstat, , , adaptive__spin); SDT_PROBE_DECLARE(lockstat, , , adaptive__block); SDT_PROBE_DECLARE(lockstat, , , spin__acquire); SDT_PROBE_DECLARE(lockstat, , , spin__release); SDT_PROBE_DECLARE(lockstat, , , spin__spin); SDT_PROBE_DECLARE(lockstat, , , rw__acquire); SDT_PROBE_DECLARE(lockstat, , , rw__release); SDT_PROBE_DECLARE(lockstat, , , rw__block); SDT_PROBE_DECLARE(lockstat, , , rw__spin); SDT_PROBE_DECLARE(lockstat, , , rw__upgrade); SDT_PROBE_DECLARE(lockstat, , , rw__downgrade); SDT_PROBE_DECLARE(lockstat, , , sx__acquire); SDT_PROBE_DECLARE(lockstat, , , sx__release); SDT_PROBE_DECLARE(lockstat, , , sx__block); SDT_PROBE_DECLARE(lockstat, , , sx__spin); SDT_PROBE_DECLARE(lockstat, , , sx__upgrade); SDT_PROBE_DECLARE(lockstat, , , sx__downgrade); SDT_PROBE_DECLARE(lockstat, , , thread__spin); #define LOCKSTAT_WRITER 0 #define LOCKSTAT_READER 1 extern int lockstat_enabled; #ifdef KDTRACE_HOOKS #define LOCKSTAT_RECORD0(probe, lp) \ SDT_PROBE1(lockstat, , , probe, lp) #define LOCKSTAT_RECORD1(probe, lp, arg1) \ SDT_PROBE2(lockstat, , , probe, lp, arg1) #define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \ SDT_PROBE3(lockstat, , , probe, lp, arg1, arg2) #define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) \ SDT_PROBE4(lockstat, , , probe, lp, arg1, arg2, arg3) #define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) \ SDT_PROBE5(lockstat, , , probe, lp, arg1, arg2, arg3, arg4) #define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \ lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \ LOCKSTAT_RECORD0(probe, lp); \ } while (0) #define LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(probe, lp, c, wt, f, l, a) do { \ lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \ LOCKSTAT_RECORD1(probe, lp, a); \ } while (0) #define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \ lock_profile_release_lock(&(lp)->lock_object); \ LOCKSTAT_RECORD0(probe, lp); \ } while (0) #define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) do { \ lock_profile_release_lock(&(lp)->lock_object); \ LOCKSTAT_RECORD1(probe, lp, a); \ } while (0) +#ifndef LOCK_PROFILING +#define LOCKSTAT_PROFILE_ENABLED(probe) SDT_PROBE_ENABLED(lockstat, , , probe) +#endif + struct lock_object; uint64_t lockstat_nsecs(struct lock_object *); #else /* !KDTRACE_HOOKS */ #define LOCKSTAT_RECORD0(probe, lp) #define LOCKSTAT_RECORD1(probe, lp, arg1) #define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) #define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) #define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) #define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) \ lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l) #define LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(probe, lp, c, wt, f, l, a) \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) #define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) \ lock_profile_release_lock(&(lp)->lock_object) #define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) \ LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) + +#ifndef LOCK_PROFILING +#define LOCKSTAT_PROFILE_ENABLED(probe) 0 +#endif #endif /* !KDTRACE_HOOKS */ #endif /* _KERNEL */ #endif /* _SYS_LOCKSTAT_H */ Index: head/sys/sys/mutex.h =================================================================== --- head/sys/sys/mutex.h (revision 313274) +++ head/sys/sys/mutex.h (revision 313275) @@ -1,520 +1,515 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ * $FreeBSD$ */ #ifndef _SYS_MUTEX_H_ #define _SYS_MUTEX_H_ #include #include #include #ifdef _KERNEL #include #include #include #include #include /* * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK * can also be passed in. */ #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ #define MTX_NEW 0x00000040 /* Don't check for double-init */ /* * Option flags passed to certain lock/unlock routines, through the use * of corresponding mtx_{lock,unlock}_flags() interface macros. */ #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ /* * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, * with the exception of MTX_UNOWNED, applies to spin locks. */ #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED) /* * Value stored in mutex->mtx_lock to denote a destroyed mutex. */ #define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED) /* * Prototypes * * NOTE: Functions prepended with `_' (underscore) are exported to other parts * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE * and LOCK_LINE or for hiding the lock cookie crunching to the * consumers. These functions should not be called directly by any * code using the API. Their macros cover their functionality. * Functions with a `_' suffix are the entrypoint for the common * KPI covering both compat shims and fast path case. These can be * used by consumers willing to pass options, file and line * informations, in an option-independent way. * * [See below for descriptions] * */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts); void _mtx_destroy(volatile uintptr_t *c); void mtx_sysinit(void *arg); int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line); void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line); #ifdef SMP void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line); #endif void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line); #endif void thread_lock_flags_(struct thread *, int, const char *, int); #define thread_lock(tdp) \ thread_lock_flags_((tdp), 0, __FILE__, __LINE__) #define thread_lock_flags(tdp, opt) \ thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) #define thread_unlock(tdp) \ mtx_unlock_spin((tdp)->td_lock) /* * Top-level macros to provide lock cookie once the actual mtx is passed. * They will also prevent passing a malformed object to the mtx KPI by * failing compilation as the mtx_lock reserved member will not be found. */ #define mtx_init(m, n, t, o) \ _mtx_init(&(m)->mtx_lock, n, t, o) #define mtx_destroy(m) \ _mtx_destroy(&(m)->mtx_lock) #define mtx_trylock_flags_(m, o, f, l) \ _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) #define _mtx_lock_sleep(m, v, t, o, f, l) \ __mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l) #define _mtx_unlock_sleep(m, o, f, l) \ __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) #ifdef SMP #define _mtx_lock_spin(m, v, t, o, f, l) \ _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l) #endif #define _mtx_lock_flags(m, o, f, l) \ __mtx_lock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_flags(m, o, f, l) \ __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_lock_spin_flags(m, o, f, l) \ __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) #define _mtx_trylock_spin_flags(m, o, f, l) \ __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_spin_flags(m, o, f, l) \ __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define _mtx_assert(m, w, f, l) \ __mtx_assert(&(m)->mtx_lock, w, f, l) #endif #define mtx_recurse lock_object.lo_data /* Very simple operations on mtx_lock. */ /* Try to obtain mtx_lock once. */ #define _mtx_obtain_lock(mp, tid) \ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) -#define _mtx_obtain_lock_fetch(mp, vp, tid) ({ \ - *vp = MTX_UNOWNED; \ - atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, vp, (tid)); \ -}) +#define _mtx_obtain_lock_fetch(mp, vp, tid) \ + atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, vp, (tid)) /* Try to release mtx_lock if it is unrecursed and uncontested. */ #define _mtx_release_lock(mp, tid) \ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) /* Release mtx_lock quickly, assuming we own it. */ #define _mtx_release_lock_quick(mp) \ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) /* * Full lock operations that are suitable to be inlined in non-debug * kernels. If the lock cannot be acquired or released trivially then * the work is deferred to another function. */ /* Lock a normal mutex. */ #define __mtx_lock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ - uintptr_t _v; \ + uintptr_t _v = MTX_UNOWNED; \ \ - if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ + !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ _mtx_lock_sleep((mp), _v, _tid, (opts), (file), (line));\ - else \ - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, \ - mp, 0, 0, file, line); \ } while (0) /* * Lock a spin mutex. For spinlocks, we handle recursion inline (it * turns out that function calls can be significantly expensive on * some architectures). Since spin locks are not _too_ common, * inlining this code is not too big a deal. */ #ifdef SMP #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ - uintptr_t _v; \ + uintptr_t _v = MTX_UNOWNED; \ \ spinlock_enter(); \ if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) { \ if (_v == _tid) \ (mp)->mtx_recurse++; \ else \ _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line));\ } else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ mp, 0, 0, file, line); \ } while (0) #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ uintptr_t _tid = (uintptr_t)(tid); \ int _ret; \ \ spinlock_enter(); \ if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ spinlock_exit(); \ _ret = 0; \ } else { \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ mp, 0, 0, file, line); \ _ret = 1; \ } \ _ret; \ }) #else /* SMP */ #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ spinlock_enter(); \ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else { \ KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ (mp)->mtx_lock = _tid; \ } \ } while (0) #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ uintptr_t _tid = (uintptr_t)(tid); \ int _ret; \ \ spinlock_enter(); \ if ((mp)->mtx_lock != MTX_UNOWNED) { \ spinlock_exit(); \ _ret = 0; \ } else { \ (mp)->mtx_lock = _tid; \ _ret = 1; \ } \ _ret; \ }) #endif /* SMP */ /* Unlock a normal mutex. */ #define __mtx_unlock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ - if ((mp)->mtx_recurse == 0) \ - LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, mp); \ - if (!_mtx_release_lock((mp), _tid)) \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ + !_mtx_release_lock((mp), _tid))) \ _mtx_unlock_sleep((mp), (opts), (file), (line)); \ } while (0) /* * Unlock a spin mutex. For spinlocks, we can handle everything * inline, as it's pretty simple and a function call would be too * expensive (at least on some architectures). Since spin locks are * not _too_ common, inlining this code is not too big a deal. * * Since we always perform a spinlock_enter() when attempting to acquire a * spin lock, we need to always perform a matching spinlock_exit() when * releasing a spin lock. This includes the recursion cases. */ #ifdef SMP #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ _mtx_release_lock_quick((mp)); \ } \ spinlock_exit(); \ } while (0) #else /* SMP */ #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ (mp)->mtx_lock = MTX_UNOWNED; \ } \ spinlock_exit(); \ } while (0) #endif /* SMP */ /* * Exported lock manipulation interface. * * mtx_lock(m) locks MTX_DEF mutex `m' * * mtx_lock_spin(m) locks MTX_SPIN mutex `m' * * mtx_unlock(m) unlocks MTX_DEF mutex `m' * * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' * * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' * and passes option flags `opts' to the "hard" function, if required. * With these routines, it is possible to pass flags such as MTX_QUIET * to the appropriate lock manipulation routines. * * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if * it cannot. Rather, it returns 0 on failure and non-zero on success. * It does NOT handle recursion as we assume that if a caller is properly * using this part of the interface, he will know that the lock in question * is _not_ recursed. * * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts * relevant option flags `opts.' * * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't * spin if it cannot. Rather, it returns 0 on failure and non-zero on * success. It always returns failure for recursed lock attempts. * * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. * * mtx_owned(m) returns non-zero if the current thread owns the lock `m' * * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. */ #define mtx_lock(m) mtx_lock_flags((m), 0) #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) #define mtx_trylock(m) mtx_trylock_flags((m), 0) #define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) #define mtx_unlock(m) mtx_unlock_flags((m), 0) #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) struct mtx_pool; struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); void mtx_pool_destroy(struct mtx_pool **poolp); struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); struct mtx *mtx_pool_alloc(struct mtx_pool *pool); #define mtx_pool_lock(pool, ptr) \ mtx_lock(mtx_pool_find((pool), (ptr))) #define mtx_pool_lock_spin(pool, ptr) \ mtx_lock_spin(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock(pool, ptr) \ mtx_unlock(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock_spin(pool, ptr) \ mtx_unlock_spin(mtx_pool_find((pool), (ptr))) /* * mtxpool_sleep is a general purpose pool of sleep mutexes. */ extern struct mtx_pool *mtxpool_sleep; #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) #define mtx_lock_flags_(m, opts, file, line) \ _mtx_lock_flags((m), (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ _mtx_unlock_flags((m), (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ _mtx_lock_spin_flags((m), (opts), (file), (line)) #define mtx_trylock_spin_flags_(m, opts, file, line) \ _mtx_trylock_spin_flags((m), (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ _mtx_unlock_spin_flags((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ #define mtx_lock_flags_(m, opts, file, line) \ __mtx_lock((m), curthread, (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ __mtx_unlock((m), curthread, (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ __mtx_lock_spin((m), curthread, (opts), (file), (line)) #define mtx_trylock_spin_flags_(m, opts, file, line) \ __mtx_trylock_spin((m), curthread, (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ __mtx_unlock_spin((m)) #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ #ifdef INVARIANTS #define mtx_assert_(m, what, file, line) \ _mtx_assert((m), (what), (file), (line)) #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) #else /* INVARIANTS */ #define mtx_assert_(m, what, file, line) (void)0 #define GIANT_REQUIRED #endif /* INVARIANTS */ #define mtx_lock_flags(m, opts) \ mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_flags(m, opts) \ mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_lock_spin_flags(m, opts) \ mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_spin_flags(m, opts) \ mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_flags(m, opts) \ mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_spin_flags(m, opts) \ mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_assert(m, what) \ mtx_assert_((m), (what), __FILE__, __LINE__) #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define MTX_READ_VALUE(m) ((m)->mtx_lock) #define mtx_initialized(m) lock_initialized(&(m)->lock_object) #define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK)) #define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m)) #define mtx_owned(m) (mtx_owner(m) == curthread) #define mtx_recursed(m) ((m)->mtx_recurse != 0) #define mtx_name(m) ((m)->lock_object.lo_name) /* * Global locks. */ extern struct mtx Giant; extern struct mtx blocked_lock; /* * Giant lock manipulation and clean exit macros. * Used to replace return with an exit Giant and return. * * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. */ #ifndef DROP_GIANT #define DROP_GIANT() \ do { \ int _giantcnt = 0; \ WITNESS_SAVE_DECL(Giant); \ \ if (mtx_owned(&Giant)) { \ WITNESS_SAVE(&Giant.lock_object, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant) && \ !SCHEDULER_STOPPED(); _giantcnt++) \ mtx_unlock(&Giant); \ } #define PICKUP_GIANT() \ PARTIAL_PICKUP_GIANT(); \ } while (0) #define PARTIAL_PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ if (_giantcnt > 0) { \ while (_giantcnt--) \ mtx_lock(&Giant); \ WITNESS_RESTORE(&Giant.lock_object, Giant); \ } #endif struct mtx_args { void *ma_mtx; const char *ma_desc; int ma_opts; }; #define MTX_SYSINIT(name, mtx, desc, opts) \ static struct mtx_args name##_args = { \ (mtx), \ (desc), \ (opts) \ }; \ SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ mtx_sysinit, &name##_args); \ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) /* * The INVARIANTS-enabled mtx_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure * support as _mtx_assert() itself uses them and the latter implies that * _mtx_assert() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define MA_OWNED LA_XLOCKED #define MA_NOTOWNED LA_UNLOCKED #define MA_RECURSED LA_RECURSED #define MA_NOTRECURSED LA_NOTRECURSED #endif /* * Common lock type names. */ #define MTX_NETWORK_LOCK "network driver" #endif /* _KERNEL */ #endif /* _SYS_MUTEX_H_ */ Index: head/sys/sys/sdt.h =================================================================== --- head/sys/sys/sdt.h (revision 313274) +++ head/sys/sys/sdt.h (revision 313275) @@ -1,425 +1,428 @@ /*- * Copyright 2006-2008 John Birrell * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * * Statically Defined Tracing (SDT) definitions. * */ #ifndef _SYS_SDT_H #define _SYS_SDT_H #ifndef _KERNEL #define _DTRACE_VERSION 1 #define DTRACE_PROBE(prov, name) { \ extern void __dtrace_##prov##___##name(void); \ __dtrace_##prov##___##name(); \ } #define DTRACE_PROBE1(prov, name, arg1) { \ extern void __dtrace_##prov##___##name(unsigned long); \ __dtrace_##prov##___##name((unsigned long)arg1); \ } #define DTRACE_PROBE2(prov, name, arg1, arg2) { \ extern void __dtrace_##prov##___##name(unsigned long, \ unsigned long); \ __dtrace_##prov##___##name((unsigned long)arg1, \ (unsigned long)arg2); \ } #define DTRACE_PROBE3(prov, name, arg1, arg2, arg3) { \ extern void __dtrace_##prov##___##name(unsigned long, \ unsigned long, unsigned long); \ __dtrace_##prov##___##name((unsigned long)arg1, \ (unsigned long)arg2, (unsigned long)arg3); \ } #define DTRACE_PROBE4(prov, name, arg1, arg2, arg3, arg4) { \ extern void __dtrace_##prov##___##name(unsigned long, \ unsigned long, unsigned long, unsigned long); \ __dtrace_##prov##___##name((unsigned long)arg1, \ (unsigned long)arg2, (unsigned long)arg3, \ (unsigned long)arg4); \ } #define DTRACE_PROBE5(prov, name, arg1, arg2, arg3, arg4, arg5) { \ extern void __dtrace_##prov##___##name(unsigned long, \ unsigned long, unsigned long, unsigned long, unsigned long);\ __dtrace_##prov##___##name((unsigned long)arg1, \ (unsigned long)arg2, (unsigned long)arg3, \ (unsigned long)arg4, (unsigned long)arg5); \ } #else /* _KERNEL */ #include #include #ifndef KDTRACE_HOOKS #define SDT_PROVIDER_DEFINE(prov) #define SDT_PROVIDER_DECLARE(prov) #define SDT_PROBE_DEFINE(prov, mod, func, name) #define SDT_PROBE_DECLARE(prov, mod, func, name) #define SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) #define SDT_PROBE_ARGTYPE(prov, mod, func, name, num, type, xtype) #define SDT_PROBE_DEFINE0(prov, mod, func, name) #define SDT_PROBE_DEFINE1(prov, mod, func, name, arg0) #define SDT_PROBE_DEFINE2(prov, mod, func, name, arg0, arg1) #define SDT_PROBE_DEFINE3(prov, mod, func, name, arg0, arg1, arg2) #define SDT_PROBE_DEFINE4(prov, mod, func, name, arg0, arg1, arg2, arg3) #define SDT_PROBE_DEFINE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) #define SDT_PROBE_DEFINE6(prov, mod, func, name, arg0, arg1, arg2, \ arg3, arg4, arg5) #define SDT_PROBE_DEFINE7(prov, mod, func, name, arg0, arg1, arg2, \ arg3, arg4, arg5, arg6) #define SDT_PROBE0(prov, mod, func, name) #define SDT_PROBE1(prov, mod, func, name, arg0) #define SDT_PROBE2(prov, mod, func, name, arg0, arg1) #define SDT_PROBE3(prov, mod, func, name, arg0, arg1, arg2) #define SDT_PROBE4(prov, mod, func, name, arg0, arg1, arg2, arg3) #define SDT_PROBE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) #define SDT_PROBE6(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4, arg5) #define SDT_PROBE7(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4, arg5, \ arg6) #define SDT_PROBE_DEFINE0_XLATE(prov, mod, func, name) #define SDT_PROBE_DEFINE1_XLATE(prov, mod, func, name, arg0, xarg0) #define SDT_PROBE_DEFINE2_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1) #define SDT_PROBE_DEFINE3_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2) #define SDT_PROBE_DEFINE4_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3) #define SDT_PROBE_DEFINE5_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4) #define SDT_PROBE_DEFINE6_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4, arg5, xarg5) #define SDT_PROBE_DEFINE7_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4, arg5, xarg5, arg6, \ xarg6) #define DTRACE_PROBE(name) #define DTRACE_PROBE1(name, type0, arg0) #define DTRACE_PROBE2(name, type0, arg0, type1, arg1) #define DTRACE_PROBE3(name, type0, arg0, type1, arg1, type2, arg2) #define DTRACE_PROBE4(name, type0, arg0, type1, arg1, type2, arg2, type3, arg3) #define DTRACE_PROBE5(name, type0, arg0, type1, arg1, type2, arg2, type3, arg3,\ type4, arg4) #else SET_DECLARE(sdt_providers_set, struct sdt_provider); SET_DECLARE(sdt_probes_set, struct sdt_probe); SET_DECLARE(sdt_argtypes_set, struct sdt_argtype); #define SDT_PROVIDER_DEFINE(prov) \ struct sdt_provider sdt_provider_##prov[1] = { \ { #prov, { NULL, NULL }, 0, 0 } \ }; \ DATA_SET(sdt_providers_set, sdt_provider_##prov); #define SDT_PROVIDER_DECLARE(prov) \ extern struct sdt_provider sdt_provider_##prov[1] #define SDT_PROBE_DEFINE(prov, mod, func, name) \ struct sdt_probe sdt_##prov##_##mod##_##func##_##name[1] = { \ { sizeof(struct sdt_probe), sdt_provider_##prov, \ { NULL, NULL }, { NULL, NULL }, #mod, #func, #name, 0, 0, \ NULL } \ }; \ DATA_SET(sdt_probes_set, sdt_##prov##_##mod##_##func##_##name); #define SDT_PROBE_DECLARE(prov, mod, func, name) \ extern struct sdt_probe sdt_##prov##_##mod##_##func##_##name[1] +#define SDT_PROBE_ENABLED(prov, mod, func, name) \ + __predict_false((sdt_##prov##_##mod##_##func##_##name->id)) + #define SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) do { \ if (__predict_false(sdt_##prov##_##mod##_##func##_##name->id)) \ (*sdt_probe_func)(sdt_##prov##_##mod##_##func##_##name->id, \ (uintptr_t) arg0, (uintptr_t) arg1, (uintptr_t) arg2, \ (uintptr_t) arg3, (uintptr_t) arg4); \ } while (0) #define SDT_PROBE_ARGTYPE(prov, mod, func, name, num, type, xtype) \ static struct sdt_argtype sdta_##prov##_##mod##_##func##_##name##num[1] \ = { { num, type, xtype, { NULL, NULL }, \ sdt_##prov##_##mod##_##func##_##name } \ }; \ DATA_SET(sdt_argtypes_set, sdta_##prov##_##mod##_##func##_##name##num); #define SDT_PROBE_DEFINE0(prov, mod, func, name) \ SDT_PROBE_DEFINE(prov, mod, func, name) #define SDT_PROBE_DEFINE1(prov, mod, func, name, arg0) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL) #define SDT_PROBE_DEFINE2(prov, mod, func, name, arg0, arg1) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL) #define SDT_PROBE_DEFINE3(prov, mod, func, name, arg0, arg1, arg2)\ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, NULL) #define SDT_PROBE_DEFINE4(prov, mod, func, name, arg0, arg1, arg2, arg3) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, NULL) #define SDT_PROBE_DEFINE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, NULL) #define SDT_PROBE_DEFINE6(prov, mod, func, name, arg0, arg1, arg2, arg3,\ arg4, arg5) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 5, arg5, NULL) #define SDT_PROBE_DEFINE7(prov, mod, func, name, arg0, arg1, arg2, arg3,\ arg4, arg5, arg6) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 5, arg5, NULL); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 6, arg6, NULL) #define SDT_PROBE_DEFINE0_XLATE(prov, mod, func, name) \ SDT_PROBE_DEFINE(prov, mod, func, name) #define SDT_PROBE_DEFINE1_XLATE(prov, mod, func, name, arg0, xarg0) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0) #define SDT_PROBE_DEFINE2_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1) #define SDT_PROBE_DEFINE3_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, xarg2) #define SDT_PROBE_DEFINE4_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, xarg2); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, xarg3) #define SDT_PROBE_DEFINE5_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, xarg2); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, xarg3); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, xarg4) #define SDT_PROBE_DEFINE6_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4, arg5, xarg5) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, xarg2); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, xarg3); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, xarg4); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 5, arg5, xarg5) #define SDT_PROBE_DEFINE7_XLATE(prov, mod, func, name, arg0, xarg0, \ arg1, xarg1, arg2, xarg2, arg3, xarg3, arg4, xarg4, arg5, xarg5, arg6, \ xarg6) \ SDT_PROBE_DEFINE(prov, mod, func, name); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0, xarg0); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1, xarg1); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2, xarg2); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3, xarg3); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4, xarg4); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 5, arg5, xarg5); \ SDT_PROBE_ARGTYPE(prov, mod, func, name, 6, arg6, xarg6) #define SDT_PROBE0(prov, mod, func, name) \ SDT_PROBE(prov, mod, func, name, 0, 0, 0, 0, 0) #define SDT_PROBE1(prov, mod, func, name, arg0) \ SDT_PROBE(prov, mod, func, name, arg0, 0, 0, 0, 0) #define SDT_PROBE2(prov, mod, func, name, arg0, arg1) \ SDT_PROBE(prov, mod, func, name, arg0, arg1, 0, 0, 0) #define SDT_PROBE3(prov, mod, func, name, arg0, arg1, arg2) \ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, 0, 0) #define SDT_PROBE4(prov, mod, func, name, arg0, arg1, arg2, arg3) \ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, 0) #define SDT_PROBE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) \ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) #define SDT_PROBE6(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4, arg5) \ do { \ if (sdt_##prov##_##mod##_##func##_##name->id) \ (*(void (*)(uint32_t, uintptr_t, uintptr_t, uintptr_t, \ uintptr_t, uintptr_t, uintptr_t))sdt_probe_func)( \ sdt_##prov##_##mod##_##func##_##name->id, \ (uintptr_t)arg0, (uintptr_t)arg1, (uintptr_t)arg2, \ (uintptr_t)arg3, (uintptr_t)arg4, (uintptr_t)arg5);\ } while (0) #define SDT_PROBE7(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4, arg5, \ arg6) \ do { \ if (sdt_##prov##_##mod##_##func##_##name->id) \ (*(void (*)(uint32_t, uintptr_t, uintptr_t, uintptr_t, \ uintptr_t, uintptr_t, uintptr_t, uintptr_t)) \ sdt_probe_func)( \ sdt_##prov##_##mod##_##func##_##name->id, \ (uintptr_t)arg0, (uintptr_t)arg1, (uintptr_t)arg2, \ (uintptr_t)arg3, (uintptr_t)arg4, (uintptr_t)arg5, \ (uintptr_t)arg6); \ } while (0) #define DTRACE_PROBE_IMPL_START(name, arg0, arg1, arg2, arg3, arg4) do { \ static SDT_PROBE_DEFINE(sdt, , , name); \ SDT_PROBE(sdt, , , name, arg0, arg1, arg2, arg3, arg4); #define DTRACE_PROBE_IMPL_END } while (0) #define DTRACE_PROBE(name) \ DTRACE_PROBE_IMPL_START(name, 0, 0, 0, 0, 0) \ DTRACE_PROBE_IMPL_END #define DTRACE_PROBE1(name, type0, arg0) \ DTRACE_PROBE_IMPL_START(name, arg0, 0, 0, 0, 0) \ SDT_PROBE_ARGTYPE(sdt, , , name, 0, #type0, NULL); \ DTRACE_PROBE_IMPL_END #define DTRACE_PROBE2(name, type0, arg0, type1, arg1) \ DTRACE_PROBE_IMPL_START(name, arg0, arg1, 0, 0, 0) \ SDT_PROBE_ARGTYPE(sdt, , , name, 0, #type0, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 1, #type1, NULL); \ DTRACE_PROBE_IMPL_END #define DTRACE_PROBE3(name, type0, arg0, type1, arg1, type2, arg2) \ DTRACE_PROBE_IMPL_START(name, arg0, arg1, arg2, 0, 0) \ SDT_PROBE_ARGTYPE(sdt, , , name, 0, #type0, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 1, #type1, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 2, #type2, NULL); \ DTRACE_PROBE_IMPL_END #define DTRACE_PROBE4(name, type0, arg0, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE_IMPL_START(name, arg0, arg1, arg2, arg3, 0) \ SDT_PROBE_ARGTYPE(sdt, , , name, 0, #type0, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 1, #type1, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 2, #type2, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 3, #type3, NULL); \ DTRACE_PROBE_IMPL_END #define DTRACE_PROBE5(name, type0, arg0, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4) \ DTRACE_PROBE_IMPL_START(name, arg0, arg1, arg2, arg3, arg4) \ SDT_PROBE_ARGTYPE(sdt, , , name, 0, #type0, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 1, #type1, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 2, #type2, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 3, #type3, NULL); \ SDT_PROBE_ARGTYPE(sdt, , , name, 4, #type4, NULL); \ DTRACE_PROBE_IMPL_END #endif /* KDTRACE_HOOKS */ /* * This type definition must match that of dtrace_probe. It is defined this * way to avoid having to rely on CDDL code. */ typedef void (*sdt_probe_func_t)(uint32_t, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); /* * The 'sdt' provider will set it to dtrace_probe when it loads. */ extern sdt_probe_func_t sdt_probe_func; struct sdt_probe; struct sdt_provider; struct linker_file; struct sdt_argtype { int ndx; /* Argument index. */ const char *type; /* Argument type string. */ const char *xtype; /* Translated argument type. */ TAILQ_ENTRY(sdt_argtype) argtype_entry; /* Argument type list entry. */ struct sdt_probe *probe; /* Ptr to the probe structure. */ }; struct sdt_probe { int version; /* Set to sizeof(struct sdt_probe). */ struct sdt_provider *prov; /* Ptr to the provider structure. */ TAILQ_ENTRY(sdt_probe) probe_entry; /* SDT probe list entry. */ TAILQ_HEAD(, sdt_argtype) argtype_list; const char *mod; const char *func; const char *name; id_t id; /* DTrace probe ID. */ int n_args; /* Number of arguments. */ struct linker_file *sdtp_lf; /* Module in which we're defined. */ }; struct sdt_provider { char *name; /* Provider name. */ TAILQ_ENTRY(sdt_provider) prov_entry; /* SDT provider list entry. */ uintptr_t id; /* DTrace provider ID. */ int sdt_refs; /* Number of module references. */ }; void sdt_probe_stub(uint32_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); SDT_PROVIDER_DECLARE(sdt); #endif /* _KERNEL */ #endif /* _SYS_SDT_H */