Index: head/sys/kern/subr_lock.c =================================================================== --- head/sys/kern/subr_lock.c (revision 261237) +++ head/sys/kern/subr_lock.c (revision 261238) @@ -1,649 +1,649 @@ /*- * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module holds the global variables and functions used to maintain * lock_object structures. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_mprof.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include CTASSERT(LOCK_CLASS_MAX == 15); struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { &lock_class_mtx_spin, &lock_class_mtx_sleep, &lock_class_sx, &lock_class_rm, &lock_class_rm_sleepable, &lock_class_rw, &lock_class_lockmgr, }; void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags) { int i; /* Check for double-init and zero object. */ - KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized", + KASSERT(!lock_initialized(lock), ("lock \"%s\" %p already initialized", name, lock)); /* Look up lock class to find its index. */ for (i = 0; i < LOCK_CLASS_MAX; i++) if (lock_classes[i] == class) { lock->lo_flags = i << LO_CLASSSHIFT; break; } KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); /* Initialize the lock object. */ lock->lo_name = name; lock->lo_flags |= flags | LO_INITIALIZED; LOCK_LOG_INIT(lock, 0); WITNESS_INIT(lock, (type != NULL) ? type : name); } void lock_destroy(struct lock_object *lock) { - KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); + KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock)); WITNESS_DESTROY(lock); LOCK_LOG_DESTROY(lock, 0); lock->lo_flags &= ~LO_INITIALIZED; } #ifdef DDB DB_SHOW_COMMAND(lock, db_show_lock) { struct lock_object *lock; struct lock_class *class; if (!have_addr) return; lock = (struct lock_object *)addr; if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); return; } class = LOCK_CLASS(lock); db_printf(" class: %s\n", class->lc_name); db_printf(" name: %s\n", lock->lo_name); class->lc_ddb_show(lock); } #endif #ifdef LOCK_PROFILING /* * One object per-thread for each lock the thread owns. Tracks individual * lock instances. */ struct lock_profile_object { LIST_ENTRY(lock_profile_object) lpo_link; struct lock_object *lpo_obj; const char *lpo_file; int lpo_line; uint16_t lpo_ref; uint16_t lpo_cnt; uint64_t lpo_acqtime; uint64_t lpo_waittime; u_int lpo_contest_locking; }; /* * One lock_prof for each (file, line, lock object) triple. */ struct lock_prof { SLIST_ENTRY(lock_prof) link; struct lock_class *class; const char *file; const char *name; int line; int ticks; uintmax_t cnt_wait_max; uintmax_t cnt_max; uintmax_t cnt_tot; uintmax_t cnt_wait; uintmax_t cnt_cur; uintmax_t cnt_contest_locking; }; SLIST_HEAD(lphead, lock_prof); #define LPROF_HASH_SIZE 4096 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) #define LPROF_CACHE_SIZE 4096 /* * Array of objects and profs for each type of object for each cpu. Spinlocks * are handled separately because a thread may be preempted and acquire a * spinlock while in the lock profiling code of a non-spinlock. In this way * we only need a critical section to protect the per-cpu lists. */ struct lock_prof_type { struct lphead lpt_lpalloc; struct lpohead lpt_lpoalloc; struct lphead lpt_hash[LPROF_HASH_SIZE]; struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; }; struct lock_prof_cpu { struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ }; struct lock_prof_cpu *lp_cpu[MAXCPU]; volatile int lock_prof_enable = 0; static volatile int lock_prof_resetting; #define LPROF_SBUF_SIZE 256 static int lock_prof_rejected; static int lock_prof_skipspin; static int lock_prof_skipcount; #ifndef USE_CPU_NANOSECONDS uint64_t nanoseconds(void) { struct bintime bt; uint64_t ns; binuptime(&bt); /* From bintime2timespec */ ns = bt.sec * (uint64_t)1000000000; ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; return (ns); } #endif static void lock_prof_init_type(struct lock_prof_type *type) { int i; SLIST_INIT(&type->lpt_lpalloc); LIST_INIT(&type->lpt_lpoalloc); for (i = 0; i < LPROF_CACHE_SIZE; i++) { SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], link); LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], lpo_link); } } static void lock_prof_init(void *arg) { int cpu; for (cpu = 0; cpu <= mp_maxid; cpu++) { lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, M_WAITOK | M_ZERO); lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); } } SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); static void lock_prof_reset_wait(void) { /* * Spin relinquishing our cpu so that quiesce_all_cpus may * complete. */ while (lock_prof_resetting) sched_relinquish(curthread); } static void lock_prof_reset(void) { struct lock_prof_cpu *lpc; int enabled, i, cpu; /* * We not only race with acquiring and releasing locks but also * thread exit. To be certain that threads exit without valid head * pointers they must see resetting set before enabled is cleared. * Otherwise a lock may not be removed from a per-thread list due * to disabled being set but not wait for reset() to remove it below. */ atomic_store_rel_int(&lock_prof_resetting, 1); enabled = lock_prof_enable; lock_prof_enable = 0; quiesce_all_cpus("profreset", 0); /* * Some objects may have migrated between CPUs. Clear all links * before we zero the structures. Some items may still be linked * into per-thread lists as well. */ for (cpu = 0; cpu <= mp_maxid; cpu++) { lpc = lp_cpu[cpu]; for (i = 0; i < LPROF_CACHE_SIZE; i++) { LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); } } for (cpu = 0; cpu <= mp_maxid; cpu++) { lpc = lp_cpu[cpu]; bzero(lpc, sizeof(*lpc)); lock_prof_init_type(&lpc->lpc_types[0]); lock_prof_init_type(&lpc->lpc_types[1]); } atomic_store_rel_int(&lock_prof_resetting, 0); lock_prof_enable = enabled; } static void lock_prof_output(struct lock_prof *lp, struct sbuf *sb) { const char *p; for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); sbuf_printf(sb, "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, lp->cnt_wait / 1000, lp->cnt_cur, lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_tot / (lp->cnt_cur * 1000), lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_wait / (lp->cnt_cur * 1000), (uintmax_t)0, lp->cnt_contest_locking, p, lp->line, lp->class->lc_name, lp->name); } static void lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, int spin, int t) { struct lock_prof_type *type; struct lock_prof *l; int cpu; dst->file = match->file; dst->line = match->line; dst->class = match->class; dst->name = match->name; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (lp_cpu[cpu] == NULL) continue; type = &lp_cpu[cpu]->lpc_types[spin]; SLIST_FOREACH(l, &type->lpt_hash[hash], link) { if (l->ticks == t) continue; if (l->file != match->file || l->line != match->line || l->name != match->name) continue; l->ticks = t; if (l->cnt_max > dst->cnt_max) dst->cnt_max = l->cnt_max; if (l->cnt_wait_max > dst->cnt_wait_max) dst->cnt_wait_max = l->cnt_wait_max; dst->cnt_tot += l->cnt_tot; dst->cnt_wait += l->cnt_wait; dst->cnt_cur += l->cnt_cur; dst->cnt_contest_locking += l->cnt_contest_locking; } } } static void lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, int t) { struct lock_prof *l; int i; for (i = 0; i < LPROF_HASH_SIZE; ++i) { SLIST_FOREACH(l, &type->lpt_hash[i], link) { struct lock_prof lp = {}; if (l->ticks == t) continue; lock_prof_sum(l, &lp, i, spin, t); lock_prof_output(&lp, sb); } } } static int dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) { struct sbuf *sb; int error, cpu, t; int enabled; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); enabled = lock_prof_enable; lock_prof_enable = 0; quiesce_all_cpus("profstat", 0); t = ticks; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (lp_cpu[cpu] == NULL) continue; lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); } lock_prof_enable = enabled; error = sbuf_finish(sb); /* Output a trailing NUL. */ if (error == 0) error = SYSCTL_OUT(req, "", 1); sbuf_delete(sb); return (error); } static int enable_lock_prof(SYSCTL_HANDLER_ARGS) { int error, v; v = lock_prof_enable; error = sysctl_handle_int(oidp, &v, v, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == lock_prof_enable) return (0); if (v == 1) lock_prof_reset(); lock_prof_enable = !!v; return (0); } static int reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) { int error, v; v = 0; error = sysctl_handle_int(oidp, &v, 0, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == 0) return (0); lock_prof_reset(); return (0); } static struct lock_prof * lock_profile_lookup(struct lock_object *lo, int spin, const char *file, int line) { const char *unknown = "(unknown)"; struct lock_prof_type *type; struct lock_prof *lp; struct lphead *head; const char *p; u_int hash; p = file; if (p == NULL || *p == '\0') p = unknown; hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; hash &= LPROF_HASH_MASK; type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; head = &type->lpt_hash[hash]; SLIST_FOREACH(lp, head, link) { if (lp->line == line && lp->file == p && lp->name == lo->lo_name) return (lp); } lp = SLIST_FIRST(&type->lpt_lpalloc); if (lp == NULL) { lock_prof_rejected++; return (lp); } SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); lp->file = p; lp->line = line; lp->class = LOCK_CLASS(lo); lp->name = lo->lo_name; SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); return (lp); } static struct lock_profile_object * lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, int line) { struct lock_profile_object *l; struct lock_prof_type *type; struct lpohead *head; head = &curthread->td_lprof[spin]; LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo && l->lpo_file == file && l->lpo_line == line) return (l); type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; l = LIST_FIRST(&type->lpt_lpoalloc); if (l == NULL) { lock_prof_rejected++; return (NULL); } LIST_REMOVE(l, lpo_link); l->lpo_obj = lo; l->lpo_file = file; l->lpo_line = line; l->lpo_cnt = 0; LIST_INSERT_HEAD(head, l, lpo_link); return (l); } void lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line) { static int lock_prof_count; struct lock_profile_object *l; int spin; if (SCHEDULER_STOPPED()) return; /* don't reset the timer when/if recursing */ if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) return; if (lock_prof_skipcount && (++lock_prof_count % lock_prof_skipcount) != 0) return; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; if (spin && lock_prof_skipspin == 1) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0) goto out; l = lock_profile_object_lookup(lo, spin, file, line); if (l == NULL) goto out; l->lpo_cnt++; if (++l->lpo_ref > 1) goto out; l->lpo_contest_locking = contested; l->lpo_acqtime = nanoseconds(); if (waittime && (l->lpo_acqtime > waittime)) l->lpo_waittime = l->lpo_acqtime - waittime; else l->lpo_waittime = 0; out: critical_exit(); } void lock_profile_thread_exit(struct thread *td) { #ifdef INVARIANTS struct lock_profile_object *l; MPASS(curthread->td_critnest == 0); #endif /* * If lock profiling was disabled we have to wait for reset to * clear our pointers before we can exit safely. */ lock_prof_reset_wait(); #ifdef INVARIANTS LIST_FOREACH(l, &td->td_lprof[0], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); LIST_FOREACH(l, &td->td_lprof[1], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); #endif MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); } void lock_profile_release_lock(struct lock_object *lo) { struct lock_profile_object *l; struct lock_prof_type *type; struct lock_prof *lp; uint64_t curtime, holdtime; struct lpohead *head; int spin; if (SCHEDULER_STOPPED()) return; if (lo->lo_flags & LO_NOPROFILE) return; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; head = &curthread->td_lprof[spin]; if (LIST_FIRST(head) == NULL) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0 && lock_prof_resetting == 1) goto out; /* * If lock profiling is not enabled we still want to remove the * lpo from our queue. */ LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo) break; if (l == NULL) goto out; if (--l->lpo_ref > 0) goto out; lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); if (lp == NULL) goto release; curtime = nanoseconds(); if (curtime < l->lpo_acqtime) goto release; holdtime = curtime - l->lpo_acqtime; /* * Record if the lock has been held longer now than ever * before. */ if (holdtime > lp->cnt_max) lp->cnt_max = holdtime; if (l->lpo_waittime > lp->cnt_wait_max) lp->cnt_wait_max = l->lpo_waittime; lp->cnt_tot += holdtime; lp->cnt_wait += l->lpo_waittime; lp->cnt_contest_locking += l->lpo_contest_locking; lp->cnt_cur += l->lpo_cnt; release: LIST_REMOVE(l, lpo_link); type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); out: critical_exit(); } static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling"); SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, &lock_prof_rejected, 0, "Number of rejected profiling records"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); #endif Index: head/sys/sys/lock.h =================================================================== --- head/sys/sys/lock.h (revision 261237) +++ head/sys/sys/lock.h (revision 261238) @@ -1,312 +1,312 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp * $FreeBSD$ */ #ifndef _SYS_LOCK_H_ #define _SYS_LOCK_H_ #include #include #include struct lock_list_entry; struct thread; /* * Lock classes. Each lock has a class which describes characteristics * common to all types of locks of a given class. * * Spin locks in general must always protect against preemption, as it is * an error to perform any type of context switch while holding a spin lock. * Also, for an individual lock to be recursable, its class must allow * recursion and the lock itself must explicitly allow recursion. * * The 'lc_ddb_show' function pointer is used to dump class-specific * data for the 'show lock' DDB command. The 'lc_lock' and * 'lc_unlock' function pointers are used in sleep(9) and cv_wait(9) * to lock and unlock locks while blocking on a sleep queue. The * return value of 'lc_unlock' will be passed to 'lc_lock' on resume * to allow communication of state between the two routines. */ struct lock_class { const char *lc_name; u_int lc_flags; void (*lc_assert)(const struct lock_object *lock, int what); void (*lc_ddb_show)(const struct lock_object *lock); void (*lc_lock)(struct lock_object *lock, uintptr_t how); int (*lc_owner)(const struct lock_object *lock, struct thread **owner); uintptr_t (*lc_unlock)(struct lock_object *lock); }; #define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */ #define LC_SPINLOCK 0x00000002 /* Spin lock. */ #define LC_SLEEPABLE 0x00000004 /* Sleeping allowed with this lock. */ #define LC_RECURSABLE 0x00000008 /* Locks of this type may recurse. */ #define LC_UPGRADABLE 0x00000010 /* Upgrades and downgrades permitted. */ #define LO_CLASSFLAGS 0x0000ffff /* Class specific flags. */ #define LO_INITIALIZED 0x00010000 /* Lock has been initialized. */ #define LO_WITNESS 0x00020000 /* Should witness monitor this lock. */ #define LO_QUIET 0x00040000 /* Don't log locking operations. */ #define LO_RECURSABLE 0x00080000 /* Lock may recurse. */ #define LO_SLEEPABLE 0x00100000 /* Lock may be held while sleeping. */ #define LO_UPGRADABLE 0x00200000 /* Lock may be upgraded/downgraded. */ #define LO_DUPOK 0x00400000 /* Don't check for duplicate acquires */ #define LO_IS_VNODE 0x00800000 /* Tell WITNESS about a VNODE lock */ #define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */ #define LO_NOPROFILE 0x10000000 /* Don't profile this lock */ /* * Lock classes are statically assigned an index into the gobal lock_classes * array. Debugging code looks up the lock class for a given lock object * by indexing the array. */ #define LO_CLASSSHIFT 24 #define LO_CLASSINDEX(lock) ((((lock)->lo_flags) & LO_CLASSMASK) >> LO_CLASSSHIFT) #define LOCK_CLASS(lock) (lock_classes[LO_CLASSINDEX((lock))]) #define LOCK_CLASS_MAX (LO_CLASSMASK >> LO_CLASSSHIFT) /* * Option flags passed to lock operations that witness also needs to know * about or that are generic across all locks. */ #define LOP_NEWORDER 0x00000001 /* Define a new lock order. */ #define LOP_QUIET 0x00000002 /* Don't log locking operations. */ #define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */ #define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ /* Flags passed to witness_assert. */ #define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */ #define LA_LOCKED 0x00000001 /* Lock is at least share locked. */ #define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */ #define LA_XLOCKED 0x00000004 /* Lock is exclusively locked. */ #define LA_RECURSED 0x00000008 /* Lock is recursed. */ #define LA_NOTRECURSED 0x00000010 /* Lock is not recursed. */ #ifdef _KERNEL /* * If any of WITNESS, INVARIANTS, or KTR_LOCK KTR tracing has been enabled, * then turn on LOCK_DEBUG. When this option is on, extra debugging * facilities such as tracking the file and line number of lock operations * are enabled. Also, mutex locking operations are not inlined to avoid * bloat from all the extra debugging code. We also have to turn on all the * calling conventions for this debugging code in modules so that modules can * work with both debug and non-debug kernels. */ #if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || (defined(KTR) && (KTR_COMPILE & KTR_LOCK)) #define LOCK_DEBUG 1 #else #define LOCK_DEBUG 0 #endif /* * In the LOCK_DEBUG case, use the filename and line numbers for debugging * operations. Otherwise, use default values to avoid the unneeded bloat. */ #if LOCK_DEBUG > 0 #define LOCK_FILE __FILE__ #define LOCK_LINE __LINE__ #else #define LOCK_FILE NULL #define LOCK_LINE 0 #endif /* * Macros for KTR_LOCK tracing. * * opname - name of this operation (LOCK/UNLOCK/SLOCK, etc.) * lo - struct lock_object * for this lock * flags - flags passed to the lock operation * recurse - this locks recursion level (or 0 if class is not recursable) * result - result of a try lock operation * file - file name * line - line number */ #define LOCK_LOG_TEST(lo, flags) \ (((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0) #define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, opname " (%s) %s %p r = %d at %s:%d", \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(recurse), (file), (line)); \ } while (0) #define LOCK_LOG_TRY(opname, lo, flags, result, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, "TRY_" opname " (%s) %s %p result=%d at %s:%d",\ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(result), (file), (line)); \ } while (0) #define LOCK_LOG_INIT(lo, flags) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR4(KTR_LOCK, "%s: %p (%s) %s", __func__, (lo), \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name); \ } while (0) #define LOCK_LOG_DESTROY(lo, flags) LOCK_LOG_INIT(lo, flags) -#define lock_initalized(lo) ((lo)->lo_flags & LO_INITIALIZED) +#define lock_initialized(lo) ((lo)->lo_flags & LO_INITIALIZED) /* * Helpful macros for quickly coming up with assertions with informative * panic messages. */ #define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__) #define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__) #define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line) #define MPASS4(ex, what, file, line) \ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line)) extern struct lock_class lock_class_mtx_sleep; extern struct lock_class lock_class_mtx_spin; extern struct lock_class lock_class_sx; extern struct lock_class lock_class_rw; extern struct lock_class lock_class_rm; extern struct lock_class lock_class_rm_sleepable; extern struct lock_class lock_class_lockmgr; extern struct lock_class *lock_classes[]; void lock_init(struct lock_object *, struct lock_class *, const char *, const char *, int); void lock_destroy(struct lock_object *); void spinlock_enter(void); void spinlock_exit(void); void witness_init(struct lock_object *, const char *); void witness_destroy(struct lock_object *); int witness_defineorder(struct lock_object *, struct lock_object *); void witness_checkorder(struct lock_object *, int, const char *, int, struct lock_object *); void witness_lock(struct lock_object *, int, const char *, int); void witness_upgrade(struct lock_object *, int, const char *, int); void witness_downgrade(struct lock_object *, int, const char *, int); void witness_unlock(struct lock_object *, int, const char *, int); void witness_save(struct lock_object *, const char **, int *); void witness_restore(struct lock_object *, const char *, int); int witness_list_locks(struct lock_list_entry **, int (*)(const char *, ...)); int witness_warn(int, struct lock_object *, const char *, ...); void witness_assert(const struct lock_object *, int, const char *, int); void witness_display_spinlock(struct lock_object *, struct thread *, int (*)(const char *, ...)); int witness_line(struct lock_object *); void witness_norelease(struct lock_object *); void witness_releaseok(struct lock_object *); const char *witness_file(struct lock_object *); void witness_thread_exit(struct thread *); #ifdef WITNESS /* Flags for witness_warn(). */ #define WARN_GIANTOK 0x01 /* Giant is exempt from this check. */ #define WARN_PANIC 0x02 /* Panic if check fails. */ #define WARN_SLEEPOK 0x04 /* Sleepable locks are exempt from check. */ #define WITNESS_INIT(lock, type) \ witness_init((lock), (type)) #define WITNESS_DESTROY(lock) \ witness_destroy(lock) #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) \ witness_checkorder((lock), (flags), (file), (line), (interlock)) #define WITNESS_DEFINEORDER(lock1, lock2) \ witness_defineorder((struct lock_object *)(lock1), \ (struct lock_object *)(lock2)) #define WITNESS_LOCK(lock, flags, file, line) \ witness_lock((lock), (flags), (file), (line)) #define WITNESS_UPGRADE(lock, flags, file, line) \ witness_upgrade((lock), (flags), (file), (line)) #define WITNESS_DOWNGRADE(lock, flags, file, line) \ witness_downgrade((lock), (flags), (file), (line)) #define WITNESS_UNLOCK(lock, flags, file, line) \ witness_unlock((lock), (flags), (file), (line)) #define WITNESS_CHECK(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_WARN(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_SAVE_DECL(n) \ const char * __CONCAT(n, __wf); \ int __CONCAT(n, __wl) #define WITNESS_SAVE(lock, n) \ witness_save((lock), &__CONCAT(n, __wf), &__CONCAT(n, __wl)) #define WITNESS_RESTORE(lock, n) \ witness_restore((lock), __CONCAT(n, __wf), __CONCAT(n, __wl)) #define WITNESS_NORELEASE(lock) \ witness_norelease(&(lock)->lock_object) #define WITNESS_RELEASEOK(lock) \ witness_releaseok(&(lock)->lock_object) #define WITNESS_FILE(lock) \ witness_file(lock) #define WITNESS_LINE(lock) \ witness_line(lock) #else /* WITNESS */ #define WITNESS_INIT(lock, type) (void)0 #define WITNESS_DESTROY(lock) (void)0 #define WITNESS_DEFINEORDER(lock1, lock2) 0 #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) (void)0 #define WITNESS_LOCK(lock, flags, file, line) (void)0 #define WITNESS_UPGRADE(lock, flags, file, line) (void)0 #define WITNESS_DOWNGRADE(lock, flags, file, line) (void)0 #define WITNESS_UNLOCK(lock, flags, file, line) (void)0 #define WITNESS_CHECK(flags, lock, fmt, ...) 0 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 #define WITNESS_SAVE_DECL(n) (void)0 #define WITNESS_SAVE(lock, n) (void)0 #define WITNESS_RESTORE(lock, n) (void)0 #define WITNESS_NORELEASE(lock) (void)0 #define WITNESS_RELEASEOK(lock) (void)0 #define WITNESS_FILE(lock) ("?") #define WITNESS_LINE(lock) (0) #endif /* WITNESS */ #endif /* _KERNEL */ #endif /* _SYS_LOCK_H_ */ Index: head/sys/sys/mutex.h =================================================================== --- head/sys/sys/mutex.h (revision 261237) +++ head/sys/sys/mutex.h (revision 261238) @@ -1,469 +1,469 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ * $FreeBSD$ */ #ifndef _SYS_MUTEX_H_ #define _SYS_MUTEX_H_ #include #include #include #ifdef _KERNEL #include #include #include #include #include /* * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK * can also be passed in. */ #define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ #define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ #define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ #define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ #define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ /* * Option flags passed to certain lock/unlock routines, through the use * of corresponding mtx_{lock,unlock}_flags() interface macros. */ #define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ #define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ /* * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, * with the exception of MTX_UNOWNED, applies to spin locks. */ #define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ #define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ #define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ #define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED) /* * Value stored in mutex->mtx_lock to denote a destroyed mutex. */ #define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED) /* * Prototypes * * NOTE: Functions prepended with `_' (underscore) are exported to other parts * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE * and LOCK_LINE or for hiding the lock cookie crunching to the * consumers. These functions should not be called directly by any * code using the API. Their macros cover their functionality. * Functions with a `_' suffix are the entrypoint for the common * KPI covering both compat shims and fast path case. These can be * used by consumers willing to pass options, file and line * informations, in an option-independent way. * * [See below for descriptions] * */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts); void _mtx_destroy(volatile uintptr_t *c); void mtx_sysinit(void *arg); int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line); #ifdef SMP void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); #endif void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line); #endif void thread_lock_flags_(struct thread *, int, const char *, int); #define thread_lock(tdp) \ thread_lock_flags_((tdp), 0, __FILE__, __LINE__) #define thread_lock_flags(tdp, opt) \ thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) #define thread_unlock(tdp) \ mtx_unlock_spin((tdp)->td_lock) /* * Top-level macros to provide lock cookie once the actual mtx is passed. * They will also prevent passing a malformed object to the mtx KPI by * failing compilation as the mtx_lock reserved member will not be found. */ #define mtx_init(m, n, t, o) \ _mtx_init(&(m)->mtx_lock, n, t, o) #define mtx_destroy(m) \ _mtx_destroy(&(m)->mtx_lock) #define mtx_trylock_flags_(m, o, f, l) \ _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) #define _mtx_lock_sleep(m, t, o, f, l) \ __mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l) #define _mtx_unlock_sleep(m, o, f, l) \ __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) #ifdef SMP #define _mtx_lock_spin(m, t, o, f, l) \ _mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l) #endif #define _mtx_lock_flags(m, o, f, l) \ __mtx_lock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_flags(m, o, f, l) \ __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) #define _mtx_lock_spin_flags(m, o, f, l) \ __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) #define _mtx_unlock_spin_flags(m, o, f, l) \ __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define _mtx_assert(m, w, f, l) \ __mtx_assert(&(m)->mtx_lock, w, f, l) #endif #define mtx_recurse lock_object.lo_data /* Very simple operations on mtx_lock. */ /* Try to obtain mtx_lock once. */ #define _mtx_obtain_lock(mp, tid) \ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) /* Try to release mtx_lock if it is unrecursed and uncontested. */ #define _mtx_release_lock(mp, tid) \ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) /* Release mtx_lock quickly, assuming we own it. */ #define _mtx_release_lock_quick(mp) \ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) /* * Full lock operations that are suitable to be inlined in non-debug * kernels. If the lock cannot be acquired or released trivially then * the work is deferred to another function. */ /* Lock a normal mutex. */ #define __mtx_lock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_mtx_obtain_lock((mp), _tid)) \ _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ } while (0) /* * Lock a spin mutex. For spinlocks, we handle recursion inline (it * turns out that function calls can be significantly expensive on * some architectures). Since spin locks are not _too_ common, * inlining this code is not too big a deal. */ #ifdef SMP #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ spinlock_enter(); \ if (!_mtx_obtain_lock((mp), _tid)) { \ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else \ _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ } else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ } while (0) #else /* SMP */ #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ spinlock_enter(); \ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else { \ KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ (mp)->mtx_lock = _tid; \ } \ } while (0) #endif /* SMP */ /* Unlock a normal mutex. */ #define __mtx_unlock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if ((mp)->mtx_recurse == 0) \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, \ (mp)); \ if (!_mtx_release_lock((mp), _tid)) \ _mtx_unlock_sleep((mp), (opts), (file), (line)); \ } while (0) /* * Unlock a spin mutex. For spinlocks, we can handle everything * inline, as it's pretty simple and a function call would be too * expensive (at least on some architectures). Since spin locks are * not _too_ common, inlining this code is not too big a deal. * * Since we always perform a spinlock_enter() when attempting to acquire a * spin lock, we need to always perform a matching spinlock_exit() when * releasing a spin lock. This includes the recursion cases. */ #ifdef SMP #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ mp); \ _mtx_release_lock_quick((mp)); \ } \ spinlock_exit(); \ } while (0) #else /* SMP */ #define __mtx_unlock_spin(mp) do { \ if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ mp); \ (mp)->mtx_lock = MTX_UNOWNED; \ } \ spinlock_exit(); \ } while (0) #endif /* SMP */ /* * Exported lock manipulation interface. * * mtx_lock(m) locks MTX_DEF mutex `m' * * mtx_lock_spin(m) locks MTX_SPIN mutex `m' * * mtx_unlock(m) unlocks MTX_DEF mutex `m' * * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' * * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' * and passes option flags `opts' to the "hard" function, if required. * With these routines, it is possible to pass flags such as MTX_QUIET * to the appropriate lock manipulation routines. * * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if * it cannot. Rather, it returns 0 on failure and non-zero on success. * It does NOT handle recursion as we assume that if a caller is properly * using this part of the interface, he will know that the lock in question * is _not_ recursed. * * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts * relevant option flags `opts.' * * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. * * mtx_owned(m) returns non-zero if the current thread owns the lock `m' * * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. */ #define mtx_lock(m) mtx_lock_flags((m), 0) #define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) #define mtx_trylock(m) mtx_trylock_flags((m), 0) #define mtx_unlock(m) mtx_unlock_flags((m), 0) #define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) struct mtx_pool; struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); void mtx_pool_destroy(struct mtx_pool **poolp); struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); struct mtx *mtx_pool_alloc(struct mtx_pool *pool); #define mtx_pool_lock(pool, ptr) \ mtx_lock(mtx_pool_find((pool), (ptr))) #define mtx_pool_lock_spin(pool, ptr) \ mtx_lock_spin(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock(pool, ptr) \ mtx_unlock(mtx_pool_find((pool), (ptr))) #define mtx_pool_unlock_spin(pool, ptr) \ mtx_unlock_spin(mtx_pool_find((pool), (ptr))) /* * mtxpool_lockbuilder is a pool of sleep locks that is not witness * checked and should only be used for building higher level locks. * * mtxpool_sleep is a general purpose pool of sleep mutexes. */ extern struct mtx_pool *mtxpool_lockbuilder; extern struct mtx_pool *mtxpool_sleep; #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) #define mtx_lock_flags_(m, opts, file, line) \ _mtx_lock_flags((m), (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ _mtx_unlock_flags((m), (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ _mtx_lock_spin_flags((m), (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ _mtx_unlock_spin_flags((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ #define mtx_lock_flags_(m, opts, file, line) \ __mtx_lock((m), curthread, (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ __mtx_unlock((m), curthread, (opts), (file), (line)) #define mtx_lock_spin_flags_(m, opts, file, line) \ __mtx_lock_spin((m), curthread, (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ __mtx_unlock_spin((m)) #endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ #ifdef INVARIANTS #define mtx_assert_(m, what, file, line) \ _mtx_assert((m), (what), (file), (line)) #define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) #else /* INVARIANTS */ #define mtx_assert_(m, what, file, line) (void)0 #define GIANT_REQUIRED #endif /* INVARIANTS */ #define mtx_lock_flags(m, opts) \ mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_flags(m, opts) \ mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_lock_spin_flags(m, opts) \ mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_spin_flags(m, opts) \ mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_flags(m, opts) \ mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_assert(m, what) \ mtx_assert_((m), (what), __FILE__, __LINE__) #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) -#define mtx_initialized(m) lock_initalized(&(m)->lock_object) +#define mtx_initialized(m) lock_initialized(&(m)->lock_object) #define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread) #define mtx_recursed(m) ((m)->mtx_recurse != 0) #define mtx_name(m) ((m)->lock_object.lo_name) /* * Global locks. */ extern struct mtx Giant; extern struct mtx blocked_lock; /* * Giant lock manipulation and clean exit macros. * Used to replace return with an exit Giant and return. * * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. */ #ifndef DROP_GIANT #define DROP_GIANT() \ do { \ int _giantcnt = 0; \ WITNESS_SAVE_DECL(Giant); \ \ if (mtx_owned(&Giant)) { \ WITNESS_SAVE(&Giant.lock_object, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant) && \ !SCHEDULER_STOPPED(); _giantcnt++) \ mtx_unlock(&Giant); \ } #define PICKUP_GIANT() \ PARTIAL_PICKUP_GIANT(); \ } while (0) #define PARTIAL_PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ if (_giantcnt > 0) { \ while (_giantcnt--) \ mtx_lock(&Giant); \ WITNESS_RESTORE(&Giant.lock_object, Giant); \ } #endif struct mtx_args { void *ma_mtx; const char *ma_desc; int ma_opts; }; #define MTX_SYSINIT(name, mtx, desc, opts) \ static struct mtx_args name##_args = { \ (mtx), \ (desc), \ (opts) \ }; \ SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ mtx_sysinit, &name##_args); \ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) /* * The INVARIANTS-enabled mtx_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure * support as _mtx_assert() itself uses them and the latter implies that * _mtx_assert() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define MA_OWNED LA_XLOCKED #define MA_NOTOWNED LA_UNLOCKED #define MA_RECURSED LA_RECURSED #define MA_NOTRECURSED LA_NOTRECURSED #endif /* * Common lock type names. */ #define MTX_NETWORK_LOCK "network driver" #endif /* _KERNEL */ #endif /* _SYS_MUTEX_H_ */ Index: head/sys/sys/rwlock.h =================================================================== --- head/sys/sys/rwlock.h (revision 261237) +++ head/sys/sys/rwlock.h (revision 261238) @@ -1,288 +1,288 @@ /*- * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_RWLOCK_H_ #define _SYS_RWLOCK_H_ #include #include #include #include #ifdef _KERNEL #include #include #endif /* * The rw_lock field consists of several fields. The low bit indicates * if the lock is locked with a read (shared) or write (exclusive) lock. * A value of 0 indicates a write lock, and a value of 1 indicates a read * lock. Bit 1 is a boolean indicating if there are any threads waiting * for a read lock. Bit 2 is a boolean indicating if there are any threads * waiting for a write lock. The rest of the variable's definition is * dependent on the value of the first bit. For a write lock, it is a * pointer to the thread holding the lock, similar to the mtx_lock field of * mutexes. For read locks, it is a count of read locks that are held. * * When the lock is not locked by any thread, it is encoded as a read lock * with zero waiters. */ #define RW_LOCK_READ 0x01 #define RW_LOCK_READ_WAITERS 0x02 #define RW_LOCK_WRITE_WAITERS 0x04 #define RW_LOCK_WRITE_SPINNER 0x08 #define RW_LOCK_FLAGMASK \ (RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS | \ RW_LOCK_WRITE_SPINNER) #define RW_LOCK_WAITERS (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS) #define RW_OWNER(x) ((x) & ~RW_LOCK_FLAGMASK) #define RW_READERS_SHIFT 4 #define RW_READERS(x) (RW_OWNER((x)) >> RW_READERS_SHIFT) #define RW_READERS_LOCK(x) ((x) << RW_READERS_SHIFT | RW_LOCK_READ) #define RW_ONE_READER (1 << RW_READERS_SHIFT) #define RW_UNLOCKED RW_READERS_LOCK(0) #define RW_DESTROYED (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS) #ifdef _KERNEL #define rw_recurse lock_object.lo_data /* Very simple operations on rw_lock. */ /* Try to obtain a write lock once. */ #define _rw_write_lock(rw, tid) \ atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid)) /* Release a write lock quickly if there are no waiters. */ #define _rw_write_unlock(rw, tid) \ atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED) /* * Full lock operations that are suitable to be inlined in non-debug * kernels. If the lock cannot be acquired or released trivially then * the work is deferred to another function. */ /* Acquire a write lock. */ #define __rw_wlock(rw, tid, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_rw_write_lock((rw), _tid)) \ _rw_wlock_hard((rw), _tid, (file), (line)); \ else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, \ rw, 0, 0, (file), (line)); \ } while (0) /* Release a write lock. */ #define __rw_wunlock(rw, tid, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ if ((rw)->rw_recurse) \ (rw)->rw_recurse--; \ else { \ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, \ (rw)); \ if (!_rw_write_unlock((rw), _tid)) \ _rw_wunlock_hard((rw), _tid, (file), (line)); \ } \ } while (0) /* * Function prototypes. Routines that start with _ are not part of the * external API and should not be called directly. Wrapper macros should * be used instead. */ void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts); void _rw_destroy(volatile uintptr_t *c); void rw_sysinit(void *arg); void rw_sysinit_flags(void *arg); int _rw_wowned(const volatile uintptr_t *c); void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line); int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line); void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line); void __rw_rlock(volatile uintptr_t *c, const char *file, int line); int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line); void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line); void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, int line); void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, int line); int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line); void __rw_downgrade(volatile uintptr_t *c, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line); #endif /* * Top-level macros to provide lock cookie once the actual rwlock is passed. * They will also prevent passing a malformed object to the rwlock KPI by * failing compilation as the rw_lock reserved member will not be found. */ #define rw_init(rw, n) \ _rw_init_flags(&(rw)->rw_lock, n, 0) #define rw_init_flags(rw, n, o) \ _rw_init_flags(&(rw)->rw_lock, n, o) #define rw_destroy(rw) \ _rw_destroy(&(rw)->rw_lock) #define rw_wowned(rw) \ _rw_wowned(&(rw)->rw_lock) #define _rw_wlock(rw, f, l) \ _rw_wlock_cookie(&(rw)->rw_lock, f, l) #define _rw_try_wlock(rw, f, l) \ __rw_try_wlock(&(rw)->rw_lock, f, l) #define _rw_wunlock(rw, f, l) \ _rw_wunlock_cookie(&(rw)->rw_lock, f, l) #define _rw_rlock(rw, f, l) \ __rw_rlock(&(rw)->rw_lock, f, l) #define _rw_try_rlock(rw, f, l) \ __rw_try_rlock(&(rw)->rw_lock, f, l) #define _rw_runlock(rw, f, l) \ _rw_runlock_cookie(&(rw)->rw_lock, f, l) #define _rw_wlock_hard(rw, t, f, l) \ __rw_wlock_hard(&(rw)->rw_lock, t, f, l) #define _rw_wunlock_hard(rw, t, f, l) \ __rw_wunlock_hard(&(rw)->rw_lock, t, f, l) #define _rw_try_upgrade(rw, f, l) \ __rw_try_upgrade(&(rw)->rw_lock, f, l) #define _rw_downgrade(rw, f, l) \ __rw_downgrade(&(rw)->rw_lock, f, l) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define _rw_assert(rw, w, f, l) \ __rw_assert(&(rw)->rw_lock, w, f, l) #endif /* * Public interface for lock operations. */ #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE) #define rw_wlock(rw) _rw_wlock((rw), LOCK_FILE, LOCK_LINE) #define rw_wunlock(rw) _rw_wunlock((rw), LOCK_FILE, LOCK_LINE) #else #define rw_wlock(rw) \ __rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE) #define rw_wunlock(rw) \ __rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE) #endif #define rw_rlock(rw) _rw_rlock((rw), LOCK_FILE, LOCK_LINE) #define rw_runlock(rw) _rw_runlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_rlock(rw) _rw_try_rlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_try_wlock(rw) _rw_try_wlock((rw), LOCK_FILE, LOCK_LINE) #define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_unlock(rw) do { \ if (rw_wowned(rw)) \ rw_wunlock(rw); \ else \ rw_runlock(rw); \ } while (0) #define rw_sleep(chan, rw, pri, wmesg, timo) \ _sleep((chan), &(rw)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) -#define rw_initialized(rw) lock_initalized(&(rw)->lock_object) +#define rw_initialized(rw) lock_initialized(&(rw)->lock_object) struct rw_args { void *ra_rw; const char *ra_desc; }; struct rw_args_flags { void *ra_rw; const char *ra_desc; int ra_flags; }; #define RW_SYSINIT(name, rw, desc) \ static struct rw_args name##_args = { \ (rw), \ (desc), \ }; \ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rw_sysinit, &name##_args); \ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock)) #define RW_SYSINIT_FLAGS(name, rw, desc, flags) \ static struct rw_args_flags name##_args = { \ (rw), \ (desc), \ (flags), \ }; \ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rw_sysinit_flags, &name##_args); \ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock)) /* * Options passed to rw_init_flags(). */ #define RW_DUPOK 0x01 #define RW_NOPROFILE 0x02 #define RW_NOWITNESS 0x04 #define RW_QUIET 0x08 #define RW_RECURSE 0x10 /* * The INVARIANTS-enabled rw_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure * support as _rw_assert() itself uses them and the latter implies that * _rw_assert() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define RA_LOCKED LA_LOCKED #define RA_RLOCKED LA_SLOCKED #define RA_WLOCKED LA_XLOCKED #define RA_UNLOCKED LA_UNLOCKED #define RA_RECURSED LA_RECURSED #define RA_NOTRECURSED LA_NOTRECURSED #endif #ifdef INVARIANTS #define rw_assert(rw, what) _rw_assert((rw), (what), LOCK_FILE, LOCK_LINE) #else #define rw_assert(rw, what) #endif #endif /* _KERNEL */ #endif /* !_SYS_RWLOCK_H_ */