diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index 1061ed5f5d03..05ce8b73b086 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -1,750 +1,748 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module holds the global variables and functions used to maintain * lock_object structures. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_mprof.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include /* * Uncomment to validate that spin argument to acquire/release routines matches * the flag in the lock */ //#define LOCK_PROFILING_DEBUG_SPIN CTASSERT(LOCK_CLASS_MAX == 15); struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { &lock_class_mtx_spin, &lock_class_mtx_sleep, &lock_class_sx, &lock_class_rm, &lock_class_rm_sleepable, &lock_class_rw, &lock_class_lockmgr, }; void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags) { int i; /* Check for double-init and zero object. */ KASSERT(flags & LO_NEW || !lock_initialized(lock), ("lock \"%s\" %p already initialized", name, lock)); /* Look up lock class to find its index. */ for (i = 0; i < LOCK_CLASS_MAX; i++) if (lock_classes[i] == class) { lock->lo_flags = i << LO_CLASSSHIFT; break; } KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); /* Initialize the lock object. */ lock->lo_name = name; lock->lo_flags |= flags | LO_INITIALIZED; LOCK_LOG_INIT(lock, 0); WITNESS_INIT(lock, (type != NULL) ? type : name); } void lock_destroy(struct lock_object *lock) { KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock)); WITNESS_DESTROY(lock); LOCK_LOG_DESTROY(lock, 0); lock->lo_flags &= ~LO_INITIALIZED; } static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "lock debugging"); static SYSCTL_NODE(_debug_lock, OID_AUTO, delay, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "lock delay"); void lock_delay(struct lock_delay_arg *la) { struct lock_delay_config *lc = la->config; - u_short i; + u_int i; for (i = la->delay; i > 0; i--) cpu_spinwait(); la->spin_cnt += la->delay; la->delay <<= 1; - if (__predict_false(la->delay > lc->max)) + if (__predict_false(la->delay > (u_int)lc->max)) la->delay = lc->max; } static u_int lock_roundup_2(u_int val) { u_int res; for (res = 1; res <= val; res <<= 1) continue; return (res); } void lock_delay_default_init(struct lock_delay_config *lc) { lc->base = 1; - lc->max = lock_roundup_2(mp_ncpus) * 256; - if (lc->max > 32678) - lc->max = 32678; + lc->max = min(lock_roundup_2(mp_ncpus) * 256, SHRT_MAX); } struct lock_delay_config __read_frequently locks_delay; u_short __read_frequently locks_delay_retries; u_short __read_frequently locks_delay_loops; SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base, 0, ""); SYSCTL_U16(_debug_lock, OID_AUTO, delay_max, CTLFLAG_RW, &locks_delay.max, 0, ""); SYSCTL_U16(_debug_lock, OID_AUTO, delay_retries, CTLFLAG_RW, &locks_delay_retries, 0, ""); SYSCTL_U16(_debug_lock, OID_AUTO, delay_loops, CTLFLAG_RW, &locks_delay_loops, 0, ""); static void locks_delay_init(void *arg __unused) { lock_delay_default_init(&locks_delay); locks_delay_retries = 10; locks_delay_loops = max(10000, locks_delay.max); } LOCK_DELAY_SYSINIT(locks_delay_init); #ifdef DDB DB_SHOW_COMMAND(lock, db_show_lock) { struct lock_object *lock; struct lock_class *class; if (!have_addr) return; lock = (struct lock_object *)addr; if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); return; } class = LOCK_CLASS(lock); db_printf(" class: %s\n", class->lc_name); db_printf(" name: %s\n", lock->lo_name); class->lc_ddb_show(lock); } #endif #ifdef LOCK_PROFILING /* * One object per-thread for each lock the thread owns. Tracks individual * lock instances. */ struct lock_profile_object { LIST_ENTRY(lock_profile_object) lpo_link; struct lock_object *lpo_obj; const char *lpo_file; int lpo_line; uint16_t lpo_ref; uint16_t lpo_cnt; uint64_t lpo_acqtime; uint64_t lpo_waittime; u_int lpo_contest_locking; }; /* * One lock_prof for each (file, line, lock object) triple. */ struct lock_prof { SLIST_ENTRY(lock_prof) link; struct lock_class *class; const char *file; const char *name; int line; int ticks; uintmax_t cnt_wait_max; uintmax_t cnt_max; uintmax_t cnt_tot; uintmax_t cnt_wait; uintmax_t cnt_cur; uintmax_t cnt_contest_locking; }; SLIST_HEAD(lphead, lock_prof); #define LPROF_HASH_SIZE 4096 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) #define LPROF_CACHE_SIZE 4096 /* * Array of objects and profs for each type of object for each cpu. Spinlocks * are handled separately because a thread may be preempted and acquire a * spinlock while in the lock profiling code of a non-spinlock. In this way * we only need a critical section to protect the per-cpu lists. */ struct lock_prof_type { struct lphead lpt_lpalloc; struct lpohead lpt_lpoalloc; struct lphead lpt_hash[LPROF_HASH_SIZE]; struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; }; struct lock_prof_cpu { struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ }; DPCPU_DEFINE_STATIC(struct lock_prof_cpu, lp); #define LP_CPU_SELF (DPCPU_PTR(lp)) #define LP_CPU(cpu) (DPCPU_ID_PTR((cpu), lp)) volatile int __read_mostly lock_prof_enable; int __read_mostly lock_contested_only; static volatile int lock_prof_resetting; #define LPROF_SBUF_SIZE 256 static int lock_prof_rejected; static int lock_prof_skipspin; #ifndef USE_CPU_NANOSECONDS uint64_t nanoseconds(void) { struct bintime bt; uint64_t ns; binuptime(&bt); /* From bintime2timespec */ ns = bt.sec * (uint64_t)1000000000; ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; return (ns); } #endif static void lock_prof_init_type(struct lock_prof_type *type) { int i; SLIST_INIT(&type->lpt_lpalloc); LIST_INIT(&type->lpt_lpoalloc); for (i = 0; i < LPROF_CACHE_SIZE; i++) { SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], link); LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], lpo_link); } } static void lock_prof_init(void *arg) { int cpu; CPU_FOREACH(cpu) { lock_prof_init_type(&LP_CPU(cpu)->lpc_types[0]); lock_prof_init_type(&LP_CPU(cpu)->lpc_types[1]); } } SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); static void lock_prof_reset_wait(void) { /* * Spin relinquishing our cpu so that quiesce_all_cpus may * complete. */ while (lock_prof_resetting) sched_relinquish(curthread); } static void lock_prof_reset(void) { struct lock_prof_cpu *lpc; int enabled, i, cpu; /* * We not only race with acquiring and releasing locks but also * thread exit. To be certain that threads exit without valid head * pointers they must see resetting set before enabled is cleared. * Otherwise a lock may not be removed from a per-thread list due * to disabled being set but not wait for reset() to remove it below. */ atomic_store_rel_int(&lock_prof_resetting, 1); enabled = lock_prof_enable; lock_prof_enable = 0; /* * This both publishes lock_prof_enable as disabled and makes sure * everyone else reads it if they are not far enough. We wait for the * rest down below. */ cpus_fence_seq_cst(); quiesce_all_critical(); /* * Some objects may have migrated between CPUs. Clear all links * before we zero the structures. Some items may still be linked * into per-thread lists as well. */ CPU_FOREACH(cpu) { lpc = LP_CPU(cpu); for (i = 0; i < LPROF_CACHE_SIZE; i++) { LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); } } CPU_FOREACH(cpu) { lpc = LP_CPU(cpu); bzero(lpc, sizeof(*lpc)); lock_prof_init_type(&lpc->lpc_types[0]); lock_prof_init_type(&lpc->lpc_types[1]); } /* * Paired with the fence from cpus_fence_seq_cst() */ atomic_store_rel_int(&lock_prof_resetting, 0); lock_prof_enable = enabled; } static void lock_prof_output(struct lock_prof *lp, struct sbuf *sb) { const char *p; for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); sbuf_printf(sb, "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, lp->cnt_wait / 1000, lp->cnt_cur, lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_tot / (lp->cnt_cur * 1000), lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_wait / (lp->cnt_cur * 1000), (uintmax_t)0, lp->cnt_contest_locking, p, lp->line, lp->class->lc_name, lp->name); } static void lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, int spin, int t) { struct lock_prof_type *type; struct lock_prof *l; int cpu; dst->file = match->file; dst->line = match->line; dst->class = match->class; dst->name = match->name; CPU_FOREACH(cpu) { type = &LP_CPU(cpu)->lpc_types[spin]; SLIST_FOREACH(l, &type->lpt_hash[hash], link) { if (l->ticks == t) continue; if (l->file != match->file || l->line != match->line || l->name != match->name) continue; l->ticks = t; if (l->cnt_max > dst->cnt_max) dst->cnt_max = l->cnt_max; if (l->cnt_wait_max > dst->cnt_wait_max) dst->cnt_wait_max = l->cnt_wait_max; dst->cnt_tot += l->cnt_tot; dst->cnt_wait += l->cnt_wait; dst->cnt_cur += l->cnt_cur; dst->cnt_contest_locking += l->cnt_contest_locking; } } } static void lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, int t) { struct lock_prof *l; int i; for (i = 0; i < LPROF_HASH_SIZE; ++i) { SLIST_FOREACH(l, &type->lpt_hash[i], link) { struct lock_prof lp = {}; if (l->ticks == t) continue; lock_prof_sum(l, &lp, i, spin, t); lock_prof_output(&lp, sb); } } } static int dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) { struct sbuf *sb; int error, cpu, t; int enabled; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); enabled = lock_prof_enable; lock_prof_enable = 0; /* * See the comment in lock_prof_reset */ cpus_fence_seq_cst(); quiesce_all_critical(); t = ticks; CPU_FOREACH(cpu) { lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t); lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t); } atomic_thread_fence_rel(); lock_prof_enable = enabled; error = sbuf_finish(sb); /* Output a trailing NUL. */ if (error == 0) error = SYSCTL_OUT(req, "", 1); sbuf_delete(sb); return (error); } static int enable_lock_prof(SYSCTL_HANDLER_ARGS) { int error, v; v = lock_prof_enable; error = sysctl_handle_int(oidp, &v, v, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == lock_prof_enable) return (0); if (v == 1) lock_prof_reset(); lock_prof_enable = !!v; return (0); } static int reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) { int error, v; v = 0; error = sysctl_handle_int(oidp, &v, 0, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == 0) return (0); lock_prof_reset(); return (0); } static struct lock_prof * lock_profile_lookup(struct lock_object *lo, int spin, const char *file, int line) { const char *unknown = "(unknown)"; struct lock_prof_type *type; struct lock_prof *lp; struct lphead *head; const char *p; u_int hash; p = file; if (p == NULL || *p == '\0') p = unknown; hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; hash &= LPROF_HASH_MASK; type = &LP_CPU_SELF->lpc_types[spin]; head = &type->lpt_hash[hash]; SLIST_FOREACH(lp, head, link) { if (lp->line == line && lp->file == p && lp->name == lo->lo_name) return (lp); } lp = SLIST_FIRST(&type->lpt_lpalloc); if (lp == NULL) { lock_prof_rejected++; return (lp); } SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); lp->file = p; lp->line = line; lp->class = LOCK_CLASS(lo); lp->name = lo->lo_name; SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); return (lp); } static struct lock_profile_object * lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, int line) { struct lock_profile_object *l; struct lock_prof_type *type; struct lpohead *head; head = &curthread->td_lprof[spin]; LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo && l->lpo_file == file && l->lpo_line == line) return (l); type = &LP_CPU_SELF->lpc_types[spin]; l = LIST_FIRST(&type->lpt_lpoalloc); if (l == NULL) { lock_prof_rejected++; return (NULL); } LIST_REMOVE(l, lpo_link); l->lpo_obj = lo; l->lpo_file = file; l->lpo_line = line; l->lpo_cnt = 0; LIST_INSERT_HEAD(head, l, lpo_link); return (l); } void lock_profile_obtain_lock_success(struct lock_object *lo, bool spin, int contested, uint64_t waittime, const char *file, int line) { struct lock_profile_object *l; #ifdef LOCK_PROFILING_DEBUG_SPIN bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK); if ((spin && !is_spin) || (!spin && is_spin)) printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__, lo->lo_name, spin, is_spin); #endif /* don't reset the timer when/if recursing */ if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) return; if (lock_contested_only && !contested) return; if (spin && lock_prof_skipspin == 1) return; if (SCHEDULER_STOPPED()) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0) goto out; l = lock_profile_object_lookup(lo, spin, file, line); if (l == NULL) goto out; l->lpo_cnt++; if (++l->lpo_ref > 1) goto out; l->lpo_contest_locking = contested; l->lpo_acqtime = nanoseconds(); if (waittime && (l->lpo_acqtime > waittime)) l->lpo_waittime = l->lpo_acqtime - waittime; else l->lpo_waittime = 0; out: /* * Paired with cpus_fence_seq_cst(). */ atomic_thread_fence_rel(); critical_exit(); } void lock_profile_thread_exit(struct thread *td) { #ifdef INVARIANTS struct lock_profile_object *l; MPASS(curthread->td_critnest == 0); #endif /* * If lock profiling was disabled we have to wait for reset to * clear our pointers before we can exit safely. */ lock_prof_reset_wait(); #ifdef INVARIANTS LIST_FOREACH(l, &td->td_lprof[0], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); LIST_FOREACH(l, &td->td_lprof[1], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); #endif MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); } void lock_profile_release_lock(struct lock_object *lo, bool spin) { struct lock_profile_object *l; struct lock_prof_type *type; struct lock_prof *lp; uint64_t curtime, holdtime; struct lpohead *head; #ifdef LOCK_PROFILING_DEBUG_SPIN bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK); if ((spin && !is_spin) || (!spin && is_spin)) printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__, lo->lo_name, spin, is_spin); #endif if (lo->lo_flags & LO_NOPROFILE) return; head = &curthread->td_lprof[spin]; if (LIST_FIRST(head) == NULL) return; if (SCHEDULER_STOPPED()) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0 && lock_prof_resetting == 1) goto out; /* * If lock profiling is not enabled we still want to remove the * lpo from our queue. */ LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo) break; if (l == NULL) goto out; if (--l->lpo_ref > 0) goto out; lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); if (lp == NULL) goto release; curtime = nanoseconds(); if (curtime < l->lpo_acqtime) goto release; holdtime = curtime - l->lpo_acqtime; /* * Record if the lock has been held longer now than ever * before. */ if (holdtime > lp->cnt_max) lp->cnt_max = holdtime; if (l->lpo_waittime > lp->cnt_wait_max) lp->cnt_wait_max = l->lpo_waittime; lp->cnt_tot += holdtime; lp->cnt_wait += l->lpo_waittime; lp->cnt_contest_locking += l->lpo_contest_locking; lp->cnt_cur += l->lpo_cnt; release: LIST_REMOVE(l, lpo_link); type = &LP_CPU_SELF->lpc_types[spin]; LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); out: /* * Paired with cpus_fence_seq_cst(). */ atomic_thread_fence_rel(); critical_exit(); } static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "lock profiling"); SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, &lock_prof_rejected, 0, "Number of rejected profiling records"); SYSCTL_INT(_debug_lock_prof, OID_AUTO, contested_only, CTLFLAG_RW, &lock_contested_only, 0, "Only profile contested acquires"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); #endif diff --git a/sys/sys/lock.h b/sys/sys/lock.h index 4031f20946c0..ce66a8ffebba 100644 --- a/sys/sys/lock.h +++ b/sys/sys/lock.h @@ -1,332 +1,332 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp * $FreeBSD$ */ #ifndef _SYS_LOCK_H_ #define _SYS_LOCK_H_ #include #include #include struct lock_list_entry; struct thread; /* * Lock classes. Each lock has a class which describes characteristics * common to all types of locks of a given class. * * Spin locks in general must always protect against preemption, as it is * an error to perform any type of context switch while holding a spin lock. * Also, for an individual lock to be recursable, its class must allow * recursion and the lock itself must explicitly allow recursion. * * The 'lc_ddb_show' function pointer is used to dump class-specific * data for the 'show lock' DDB command. The 'lc_lock' and * 'lc_unlock' function pointers are used in sleep(9) and cv_wait(9) * to lock and unlock locks while blocking on a sleep queue. The * return value of 'lc_unlock' will be passed to 'lc_lock' on resume * to allow communication of state between the two routines. */ struct lock_class { const char *lc_name; u_int lc_flags; void (*lc_assert)(const struct lock_object *lock, int what); void (*lc_ddb_show)(const struct lock_object *lock); void (*lc_lock)(struct lock_object *lock, uintptr_t how); int (*lc_owner)(const struct lock_object *lock, struct thread **owner); uintptr_t (*lc_unlock)(struct lock_object *lock); }; #define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */ #define LC_SPINLOCK 0x00000002 /* Spin lock. */ #define LC_SLEEPABLE 0x00000004 /* Sleeping allowed with this lock. */ #define LC_RECURSABLE 0x00000008 /* Locks of this type may recurse. */ #define LC_UPGRADABLE 0x00000010 /* Upgrades and downgrades permitted. */ #define LO_CLASSFLAGS 0x0000ffff /* Class specific flags. */ #define LO_INITIALIZED 0x00010000 /* Lock has been initialized. */ #define LO_WITNESS 0x00020000 /* Should witness monitor this lock. */ #define LO_QUIET 0x00040000 /* Don't log locking operations. */ #define LO_RECURSABLE 0x00080000 /* Lock may recurse. */ #define LO_SLEEPABLE 0x00100000 /* Lock may be held while sleeping. */ #define LO_UPGRADABLE 0x00200000 /* Lock may be upgraded/downgraded. */ #define LO_DUPOK 0x00400000 /* Don't check for duplicate acquires */ #define LO_IS_VNODE 0x00800000 /* Tell WITNESS about a VNODE lock */ #define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */ #define LO_NOPROFILE 0x10000000 /* Don't profile this lock */ #define LO_NEW 0x20000000 /* Don't check for double-init */ /* * Lock classes are statically assigned an index into the gobal lock_classes * array. Debugging code looks up the lock class for a given lock object * by indexing the array. */ #define LO_CLASSSHIFT 24 #define LO_CLASSINDEX(lock) ((((lock)->lo_flags) & LO_CLASSMASK) >> LO_CLASSSHIFT) #define LOCK_CLASS(lock) (lock_classes[LO_CLASSINDEX((lock))]) #define LOCK_CLASS_MAX (LO_CLASSMASK >> LO_CLASSSHIFT) /* * Option flags passed to lock operations that witness also needs to know * about or that are generic across all locks. */ #define LOP_NEWORDER 0x00000001 /* Define a new lock order. */ #define LOP_QUIET 0x00000002 /* Don't log locking operations. */ #define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */ #define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ #define LOP_NOSLEEP 0x00000020 /* Non-sleepable despite LO_SLEEPABLE */ /* Flags passed to witness_assert. */ #define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */ #define LA_LOCKED 0x00000001 /* Lock is at least share locked. */ #define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */ #define LA_XLOCKED 0x00000004 /* Lock is exclusively locked. */ #define LA_RECURSED 0x00000008 /* Lock is recursed. */ #define LA_NOTRECURSED 0x00000010 /* Lock is not recursed. */ #ifdef _KERNEL /* * Macros for KTR_LOCK tracing. * * opname - name of this operation (LOCK/UNLOCK/SLOCK, etc.) * lo - struct lock_object * for this lock * flags - flags passed to the lock operation * recurse - this locks recursion level (or 0 if class is not recursable) * result - result of a try lock operation * file - file name * line - line number */ #if LOCK_DEBUG > 0 #define LOCK_LOG_TEST(lo, flags) \ (((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0) #else #define LOCK_LOG_TEST(lo, flags) 0 #endif #define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, opname " (%s) %s %p r = %d at %s:%d", \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(recurse), (file), (line)); \ } while (0) #define LOCK_LOG_TRY(opname, lo, flags, result, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, "TRY_" opname " (%s) %s %p result=%d at %s:%d",\ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(result), (file), (line)); \ } while (0) #define LOCK_LOG_INIT(lo, flags) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR4(KTR_LOCK, "%s: %p (%s) %s", __func__, (lo), \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name); \ } while (0) #define LOCK_LOG_DESTROY(lo, flags) LOCK_LOG_INIT(lo, flags) #define lock_initialized(lo) ((lo)->lo_flags & LO_INITIALIZED) extern struct lock_class lock_class_mtx_sleep; extern struct lock_class lock_class_mtx_spin; extern struct lock_class lock_class_sx; extern struct lock_class lock_class_rw; extern struct lock_class lock_class_rm; extern struct lock_class lock_class_rm_sleepable; extern struct lock_class lock_class_lockmgr; extern struct lock_class *lock_classes[]; struct lock_delay_config { u_short base; u_short max; }; extern struct lock_delay_config locks_delay; extern u_short locks_delay_retries; extern u_short locks_delay_loops; struct lock_delay_arg { struct lock_delay_config *config; - u_short delay; + u_int delay; u_int spin_cnt; }; static inline void lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc) { la->config = lc; la->delay = lc->base; la->spin_cnt = 0; } static inline void lock_delay_arg_init_noadapt(struct lock_delay_arg *la) { la->delay = 0; la->spin_cnt = 0; } #define lock_delay_spin(n) do { \ u_int _i; \ \ for (_i = (n); _i > 0; _i--) \ cpu_spinwait(); \ } while (0) #define LOCK_DELAY_SYSINIT(func) \ SYSINIT(func##_ld, SI_SUB_LOCK, SI_ORDER_ANY, func, NULL) #define LOCK_DELAY_SYSINIT_DEFAULT(lc) \ SYSINIT(lock_delay_##lc##_ld, SI_SUB_LOCK, SI_ORDER_ANY, \ lock_delay_default_init, &lc) void lock_init(struct lock_object *, struct lock_class *, const char *, const char *, int); void lock_destroy(struct lock_object *); void lock_delay(struct lock_delay_arg *); void lock_delay_default_init(struct lock_delay_config *); void spinlock_enter(void); void spinlock_exit(void); void witness_init(struct lock_object *, const char *); void witness_destroy(struct lock_object *); int witness_defineorder(struct lock_object *, struct lock_object *); void witness_checkorder(struct lock_object *, int, const char *, int, struct lock_object *); void witness_lock(struct lock_object *, int, const char *, int); void witness_upgrade(struct lock_object *, int, const char *, int); void witness_downgrade(struct lock_object *, int, const char *, int); void witness_unlock(struct lock_object *, int, const char *, int); void witness_save(struct lock_object *, const char **, int *); void witness_restore(struct lock_object *, const char *, int); int witness_list_locks(struct lock_list_entry **, int (*)(const char *, ...)); int witness_warn(int, struct lock_object *, const char *, ...); void witness_assert(const struct lock_object *, int, const char *, int); int witness_is_owned(const struct lock_object *lock); void witness_display_spinlock(struct lock_object *, struct thread *, int (*)(const char *, ...)); int witness_line(struct lock_object *); void witness_norelease(struct lock_object *); void witness_releaseok(struct lock_object *); const char *witness_file(struct lock_object *); void witness_thread_exit(struct thread *); #ifdef WITNESS int witness_startup_count(void); void witness_startup(void *); /* Flags for witness_warn(). */ #define WARN_GIANTOK 0x01 /* Giant is exempt from this check. */ #define WARN_PANIC 0x02 /* Panic if check fails. */ #define WARN_SLEEPOK 0x04 /* Sleepable locks are exempt from check. */ #define WITNESS_INIT(lock, type) \ witness_init((lock), (type)) #define WITNESS_DESTROY(lock) \ witness_destroy(lock) #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) \ witness_checkorder((lock), (flags), (file), (line), (interlock)) #define WITNESS_DEFINEORDER(lock1, lock2) \ witness_defineorder((struct lock_object *)(lock1), \ (struct lock_object *)(lock2)) #define WITNESS_LOCK(lock, flags, file, line) \ witness_lock((lock), (flags), (file), (line)) #define WITNESS_UPGRADE(lock, flags, file, line) \ witness_upgrade((lock), (flags), (file), (line)) #define WITNESS_DOWNGRADE(lock, flags, file, line) \ witness_downgrade((lock), (flags), (file), (line)) #define WITNESS_UNLOCK(lock, flags, file, line) \ witness_unlock((lock), (flags), (file), (line)) #define WITNESS_CHECK(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_WARN(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_SAVE_DECL(n) \ const char * __CONCAT(n, __wf); \ int __CONCAT(n, __wl) #define WITNESS_SAVE(lock, n) \ witness_save((lock), &__CONCAT(n, __wf), &__CONCAT(n, __wl)) #define WITNESS_RESTORE(lock, n) \ witness_restore((lock), __CONCAT(n, __wf), __CONCAT(n, __wl)) #define WITNESS_NORELEASE(lock) \ witness_norelease(&(lock)->lock_object) #define WITNESS_RELEASEOK(lock) \ witness_releaseok(&(lock)->lock_object) #define WITNESS_FILE(lock) \ witness_file(lock) #define WITNESS_LINE(lock) \ witness_line(lock) #else /* WITNESS */ #define WITNESS_INIT(lock, type) (void)0 #define WITNESS_DESTROY(lock) (void)0 #define WITNESS_DEFINEORDER(lock1, lock2) 0 #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) (void)0 #define WITNESS_LOCK(lock, flags, file, line) (void)0 #define WITNESS_UPGRADE(lock, flags, file, line) (void)0 #define WITNESS_DOWNGRADE(lock, flags, file, line) (void)0 #define WITNESS_UNLOCK(lock, flags, file, line) (void)0 #define WITNESS_CHECK(flags, lock, fmt, ...) 0 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 #define WITNESS_SAVE_DECL(n) (void)0 #define WITNESS_SAVE(lock, n) (void)0 #define WITNESS_RESTORE(lock, n) (void)0 #define WITNESS_NORELEASE(lock) (void)0 #define WITNESS_RELEASEOK(lock) (void)0 #define WITNESS_FILE(lock) ("?") #define WITNESS_LINE(lock) (0) #endif /* WITNESS */ #endif /* _KERNEL */ #endif /* _SYS_LOCK_H_ */