diff --git a/share/man/man9/rmlock.9 b/share/man/man9/rmlock.9 index 53de7c73b79a..6d128f986968 100644 --- a/share/man/man9/rmlock.9 +++ b/share/man/man9/rmlock.9 @@ -1,380 +1,383 @@ .\" Copyright (c) 2007 Stephan Uphoff .\" Copyright (c) 2006 Gleb Smirnoff .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .\" Based on rwlock.9 man page -.Dd December 27, 2019 +.Dd April 12, 2021 .Dt RMLOCK 9 .Os .Sh NAME .Nm rmlock , .Nm rm_init , .Nm rm_init_flags , .Nm rm_destroy , .Nm rm_rlock , .Nm rm_try_rlock , .Nm rm_wlock , .Nm rm_runlock , .Nm rm_wunlock , .Nm rm_wowned , .Nm rm_sleep , .Nm rm_assert , .Nm RM_SYSINIT , .Nm RM_SYSINIT_FLAGS , .Nm rms_init , .Nm rms_destroy , .Nm rms_rlock , .Nm rms_wlock , .Nm rms_runlock , .Nm rms_wunlock .Nd kernel reader/writer lock optimized for read-mostly access patterns .Sh SYNOPSIS .In sys/param.h .In sys/lock.h .In sys/rmlock.h .Ft void .Fn rm_init "struct rmlock *rm" "const char *name" .Ft void .Fn rm_init_flags "struct rmlock *rm" "const char *name" "int opts" .Ft void .Fn rm_destroy "struct rmlock *rm" .Ft void .Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" .Ft int .Fn rm_try_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" .Ft void .Fn rm_wlock "struct rmlock *rm" .Ft void .Fn rm_runlock "struct rmlock *rm" "struct rm_priotracker* tracker" .Ft void .Fn rm_wunlock "struct rmlock *rm" .Ft int .Fn rm_wowned "const struct rmlock *rm" .Ft int .Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo" .Pp .Cd "options INVARIANTS" .Cd "options INVARIANT_SUPPORT" .Ft void .Fn rm_assert "struct rmlock *rm" "int what" .In sys/kernel.h .Fn RM_SYSINIT "name" "struct rmlock *rm" "const char *desc" .Fn RM_SYSINIT_FLAGS "name" "struct rmlock *rm" "const char *desc" "int flags" .Ft void .Fn rms_init "struct rmslock *rms" "const char *name" .Ft void .Fn rms_destroy "struct rmslock *rms" .Ft void .Fn rms_rlock "struct rmslock *rms" .Ft void .Fn rms_wlock "struct rmslock *rms" .Ft void .Fn rms_runlock "struct rmslock *rms" .Ft void .Fn rms_wunlock "struct rmslock *rms" .Sh DESCRIPTION Read-mostly locks allow shared access to protected data by multiple threads, or exclusive access by a single thread. The threads with shared access are known as .Em readers since they only read the protected data. A thread with exclusive access is known as a .Em writer since it can modify protected data. .Pp Read-mostly locks are designed to be efficient for locks almost exclusively used as reader locks and as such should be used for protecting data that rarely changes. Acquiring an exclusive lock after the lock has been locked for shared access is an expensive operation. .Pp Normal read-mostly locks are similar to .Xr rwlock 9 locks and follow the same lock ordering rules as .Xr rwlock 9 locks. Read-mostly locks have full priority propagation like mutexes. Unlike .Xr rwlock 9 , read-mostly locks propagate priority to both readers and writers. This is implemented via the .Va rm_priotracker structure argument supplied to .Fn rm_rlock and .Fn rm_runlock . Readers can recurse if the lock is initialized with the .Dv RM_RECURSE option; however, writers are never allowed to recurse. .Pp Sleeping for writers can be allowed by passing .Dv RM_SLEEPABLE to .Fn rm_init_flags . It changes lock ordering rules to the same as for .Xr sx 9 locks. They do not propagate priority to writers, but they do propagate priority to readers. Note that readers are not permitted to sleep regardless of the flag. .Pp Sleepable read-mostly locks (created with .Fn rms_init ) allow sleeping for both readers and writers, but don't do priority propagation for either. They follow .Xr sx 9 lock ordering. .Ss Macros and Functions .Bl -tag -width indent .It Fn rm_init "struct rmlock *rm" "const char *name" Initialize the read-mostly lock .Fa rm . The .Fa name description is used solely for debugging purposes. This function must be called before any other operations on the lock. .It Fn rm_init_flags "struct rmlock *rm" "const char *name" "int opts" Similar to .Fn rm_init , initialize the read-mostly lock .Fa rm with a set of optional flags. The .Fa opts arguments contains one or more of the following flags: .Bl -tag -width ".Dv RM_NOWITNESS" .It Dv RM_NOWITNESS Instruct .Xr witness 4 to ignore this lock. .It Dv RM_RECURSE Allow threads to recursively acquire shared locks for .Fa rm . .It Dv RM_SLEEPABLE Create a sleepable read-mostly lock. .It Dv RM_NEW If the kernel has been compiled with .Cd "option INVARIANTS" , .Fn rm_init_flags will assert that the .Fa rm has not been initialized multiple times without intervening calls to .Fn rm_destroy unless this option is specified. +.It Dv RM_DUPOK +.Xr witness 4 +should not log messages about duplicate locks being acquired. .El .It Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" Lock .Fa rm as a reader using .Fa tracker to track read owners of a lock for priority propagation. This data structure is only used internally by .Nm and must persist until .Fn rm_runlock has been called. This data structure can be allocated on the stack since readers cannot sleep. If any thread holds this lock exclusively, the current thread blocks, and its priority is propagated to the exclusive holder. If the lock was initialized with the .Dv RM_RECURSE option the .Fn rm_rlock function can be called when the current thread has already acquired reader access on .Fa rm . .It Fn rm_try_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" Try to lock .Fa rm as a reader. .Fn rm_try_rlock will return 0 if the lock cannot be acquired immediately; otherwise, the lock will be acquired and a non-zero value will be returned. Note that .Fn rm_try_rlock may fail even while the lock is not currently held by a writer. If the lock was initialized with the .Dv RM_RECURSE option, .Fn rm_try_rlock will succeed if the current thread has already acquired reader access. .It Fn rm_wlock "struct rmlock *rm" Lock .Fa rm as a writer. If there are any shared owners of the lock, the current thread blocks. The .Fn rm_wlock function cannot be called recursively. .It Fn rm_runlock "struct rmlock *rm" "struct rm_priotracker* tracker" This function releases a shared lock previously acquired by .Fn rm_rlock . The .Fa tracker argument must match the .Fa tracker argument used for acquiring the shared lock .It Fn rm_wunlock "struct rmlock *rm" This function releases an exclusive lock previously acquired by .Fn rm_wlock . .It Fn rm_destroy "struct rmlock *rm" This functions destroys a lock previously initialized with .Fn rm_init . The .Fa rm lock must be unlocked. .It Fn rm_wowned "const struct rmlock *rm" This function returns a non-zero value if the current thread owns an exclusive lock on .Fa rm . .It Fn rm_sleep "void *wchan" "struct rmlock *rm" "int priority" "const char *wmesg" "int timo" This function atomically releases .Fa rm while waiting for an event. The .Fa rm lock must be exclusively locked. For more details on the parameters to this function, see .Xr sleep 9 . .It Fn rm_assert "struct rmlock *rm" "int what" This function asserts that the .Fa rm lock is in the state specified by .Fa what . If the assertions are not true and the kernel is compiled with .Cd "options INVARIANTS" and .Cd "options INVARIANT_SUPPORT" , the kernel will panic. Currently the following base assertions are supported: .Bl -tag -width ".Dv RA_UNLOCKED" .It Dv RA_LOCKED Assert that current thread holds either a shared or exclusive lock of .Fa rm . .It Dv RA_RLOCKED Assert that current thread holds a shared lock of .Fa rm . .It Dv RA_WLOCKED Assert that current thread holds an exclusive lock of .Fa rm . .It Dv RA_UNLOCKED Assert that current thread holds neither a shared nor exclusive lock of .Fa rm . .El .Pp In addition, one of the following optional flags may be specified with .Dv RA_LOCKED , .Dv RA_RLOCKED , or .Dv RA_WLOCKED : .Bl -tag -width ".Dv RA_NOTRECURSED" .It Dv RA_RECURSED Assert that the current thread holds a recursive lock of .Fa rm . .It Dv RA_NOTRECURSED Assert that the current thread does not hold a recursive lock of .Fa rm . .El .El .Bl -tag -width indent .It Fn rms_init "struct rmslock *rms" "const char *name" Initialize the sleepable read-mostly lock .Fa rms . The .Fa name description is used as .Fa wmesg parameter to the .Xr msleep 9 routine. This function must be called before any other operations on the lock. .It Fn rms_rlock "struct rmlock *rm" Lock .Fa rms as a reader. If any thread holds this lock exclusively, the current thread blocks. .It Fn rms_wlock "struct rmslock *rms" Lock .Fa rms as a writer. If the lock is already taken, the current thread blocks. The .Fn rms_wlock function cannot be called recursively. .It Fn rms_runlock "struct rmslock *rms" This function releases a shared lock previously acquired by .Fn rms_rlock . .It Fn rms_wunlock "struct rmslock *rms" This function releases an exclusive lock previously acquired by .Fn rms_wlock . .It Fn rms_destroy "struct rmslock *rms" This functions destroys a lock previously initialized with .Fn rms_init . The .Fa rms lock must be unlocked. .Sh SEE ALSO .Xr locking 9 , .Xr mutex 9 , .Xr panic 9 , .Xr rwlock 9 , .Xr sema 9 , .Xr sleep 9 , .Xr sx 9 .Sh HISTORY These functions appeared in .Fx 7.0 . .Sh AUTHORS .An -nosplit The .Nm facility was written by .An "Stephan Uphoff" . This manual page was written by .An "Gleb Smirnoff" for rwlock and modified to reflect rmlock by .An "Stephan Uphoff" . .Sh BUGS The .Nm implementation is currently not optimized for single processor systems. .Pp .Fn rm_try_rlock can fail transiently even when there is no writer, while another reader updates the state on the local CPU. .Pp The .Nm implementation uses a single per CPU list shared by all rmlocks in the system. If rmlocks become popular, hashing to multiple per CPU queues may be needed to speed up the writer lock process. diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index f661e209b633..7230a00e357b 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -1,1250 +1,1252 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif /* * A cookie to mark destroyed rmlocks. This is stored in the head of * rm_activeReaders. */ #define RM_DESTROYED ((void *)0xdead) #define rm_destroyed(rm) \ (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED) #define RMPF_ONQUEUE 1 #define RMPF_SIGNAL 2 #ifndef INVARIANTS #define _rm_assert(c, what, file, line) #endif static void assert_rm(const struct lock_object *lock, int what); #ifdef DDB static void db_show_rm(const struct lock_object *lock); #endif static void lock_rm(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_rm(struct lock_object *lock); struct lock_class lock_class_rm = { .lc_name = "rm", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; struct lock_class lock_class_rm_sleepable = { .lc_name = "sleepable rm", .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; static void assert_rm(const struct lock_object *lock, int what) { rm_assert((const struct rmlock *)lock, what); } static void lock_rm(struct lock_object *lock, uintptr_t how) { struct rmlock *rm; struct rm_priotracker *tracker; rm = (struct rmlock *)lock; if (how == 0) rm_wlock(rm); else { tracker = (struct rm_priotracker *)how; rm_rlock(rm, tracker); } } static uintptr_t unlock_rm(struct lock_object *lock) { struct thread *td; struct pcpu *pc; struct rmlock *rm; struct rm_queue *queue; struct rm_priotracker *tracker; uintptr_t how; rm = (struct rmlock *)lock; tracker = NULL; how = 0; rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); if (rm_wowned(rm)) rm_wunlock(rm); else { /* * Find the right rm_priotracker structure for curthread. * The guarantee about its uniqueness is given by the fact * we already asserted the lock wasn't recursively acquired. */ critical_enter(); td = curthread; pc = get_pcpu(); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) { how = (uintptr_t)tracker; break; } } KASSERT(tracker != NULL, ("rm_priotracker is non-NULL when lock held in read mode")); critical_exit(); rm_runlock(rm, tracker); } return (how); } #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner) { const struct rmlock *rm; struct lock_class *lc; rm = (const struct rmlock *)lock; lc = LOCK_CLASS(&rm->rm_wlock_object); return (lc->lc_owner(&rm->rm_wlock_object, owner)); } #endif static struct mtx rm_spinlock; MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); /* * Add or remove tracker from per-cpu list. * * The per-cpu list can be traversed at any time in forward direction from an * interrupt on the *local* cpu. */ static void inline rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next; /* Initialize all tracker pointers */ tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; next = pc->pc_rm_queue.rmq_next; tracker->rmp_cpuQueue.rmq_next = next; /* rmq_prev is not used during froward traversal. */ next->rmq_prev = &tracker->rmp_cpuQueue; /* Update pointer to first element. */ pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; } /* * Return a count of the number of trackers the thread 'td' already * has on this CPU for the lock 'rm'. */ static int rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, const struct thread *td) { struct rm_queue *queue; struct rm_priotracker *tracker; int count; count = 0; for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) count++; } return (count); } static void inline rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next, *prev; next = tracker->rmp_cpuQueue.rmq_next; prev = tracker->rmp_cpuQueue.rmq_prev; /* Not used during forward traversal. */ next->rmq_prev = prev; /* Remove from list. */ prev->rmq_next = next; } static void rm_cleanIPI(void *arg) { struct pcpu *pc; struct rmlock *rm = arg; struct rm_priotracker *tracker; struct rm_queue *queue; pc = get_pcpu(); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { tracker->rmp_flags = RMPF_ONQUEUE; mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); mtx_unlock_spin(&rm_spinlock); } } } void rm_init_flags(struct rmlock *rm, const char *name, int opts) { struct lock_class *lc; int liflags, xflags; liflags = 0; if (!(opts & RM_NOWITNESS)) liflags |= LO_WITNESS; if (opts & RM_RECURSE) liflags |= LO_RECURSABLE; if (opts & RM_NEW) liflags |= LO_NEW; + if (opts & RM_DUPOK) + liflags |= LO_DUPOK; rm->rm_writecpus = all_cpus; LIST_INIT(&rm->rm_activeReaders); if (opts & RM_SLEEPABLE) { liflags |= LO_SLEEPABLE; lc = &lock_class_rm_sleepable; xflags = (opts & RM_NEW ? SX_NEW : 0); sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", xflags | SX_NOWITNESS); } else { lc = &lock_class_rm; xflags = (opts & RM_NEW ? MTX_NEW : 0); mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", xflags | MTX_NOWITNESS); } lock_init(&rm->lock_object, lc, name, NULL, liflags); } void rm_init(struct rmlock *rm, const char *name) { rm_init_flags(rm, name, 0); } void rm_destroy(struct rmlock *rm) { rm_assert(rm, RA_UNLOCKED); LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_destroy(&rm->rm_lock_sx); else mtx_destroy(&rm->rm_lock_mtx); lock_destroy(&rm->lock_object); } int rm_wowned(const struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) return (sx_xlocked(&rm->rm_lock_sx)); else return (mtx_owned(&rm->rm_lock_mtx)); } void rm_sysinit(void *arg) { struct rm_args *args; args = arg; rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags); } static __noinline int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct pcpu *pc; critical_enter(); pc = get_pcpu(); /* Check if we just need to do a proper critical_exit. */ if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { critical_exit(); return (1); } /* Remove our tracker from the per-cpu list. */ rm_tracker_remove(pc, tracker); /* * Check to see if the IPI granted us the lock after all. The load of * rmp_flags must happen after the tracker is removed from the list. */ atomic_interrupt_fence(); if (tracker->rmp_flags) { /* Just add back tracker - we hold the lock. */ rm_tracker_add(pc, tracker); critical_exit(); return (1); } /* * We allow readers to acquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. */ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { /* * Just grant the lock if this thread already has a tracker * for this lock on the per-cpu queue. */ if (rm_trackers_present(pc, rm, curthread) != 0) { mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); tracker->rmp_flags = RMPF_ONQUEUE; mtx_unlock_spin(&rm_spinlock); rm_tracker_add(pc, tracker); critical_exit(); return (1); } } sched_unpin(); critical_exit(); if (trylock) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { if (!sx_try_xlock(&rm->rm_lock_sx)) return (0); } else { if (!mtx_trylock(&rm->rm_lock_mtx)) return (0); } } else { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { THREAD_SLEEPING_OK(); sx_xlock(&rm->rm_lock_sx); THREAD_NO_SLEEPING(); } else mtx_lock(&rm->rm_lock_mtx); } critical_enter(); pc = get_pcpu(); CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); return (1); } int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct thread *td = curthread; struct pcpu *pc; if (SCHEDULER_STOPPED()) return (1); tracker->rmp_flags = 0; tracker->rmp_thread = td; tracker->rmp_rmlock = rm; if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_NO_SLEEPING(); td->td_critnest++; /* critical_enter(); */ atomic_interrupt_fence(); pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_add(pc, tracker); sched_pin(); atomic_interrupt_fence(); td->td_critnest--; /* * Fast path to combine two common conditions into a single * conditional jump. */ if (__predict_true(0 == (td->td_owepreempt | CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) return (1); /* We do not have a read token and need to acquire one. */ return _rm_rlock_hard(rm, tracker, trylock); } static __noinline void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { if (td->td_owepreempt) { td->td_critnest++; critical_exit(); } if (!tracker->rmp_flags) return; mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; rm = tracker->rmp_rmlock; turnstile_chain_lock(&rm->lock_object); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts); turnstile_chain_unlock(&rm->lock_object); } else mtx_unlock_spin(&rm_spinlock); } void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) { struct pcpu *pc; struct thread *td = tracker->rmp_thread; if (SCHEDULER_STOPPED()) return; td->td_critnest++; /* critical_enter(); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); td->td_critnest--; sched_unpin(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_SLEEPING_OK(); if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) return; _rm_unlock_hard(td, tracker); } void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; cpuset_t readcpus; if (SCHEDULER_STOPPED()) return; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ readcpus = all_cpus; CPU_ANDNOT(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP smp_rendezvous_cpus(readcpus, smp_no_rendezvous_barrier, rm_cleanIPI, smp_no_rendezvous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } } void _rm_wunlock(struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); } #if LOCK_DEBUG > 0 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_wlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_UNLOCKED, file, line); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); _rm_wlock(rm); LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_WLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_wunlock(rm); TD_LOCKS_DEC(curthread); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { if (SCHEDULER_STOPPED()) return (1); #ifdef INVARIANTS if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { critical_enter(); KASSERT(rm_trackers_present(get_pcpu(), rm, curthread) == 0, ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n", rm->lock_object.lo_name, file, line)); critical_exit(); } #endif KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_rlock() of destroyed rmlock @ %s:%d", file, line)); if (!trylock) { KASSERT(!rm_wowned(rm), ("rm_rlock: wlock already held for %s @ %s:%d", rm->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_NOSLEEP, file, line, NULL); } if (_rm_rlock(rm, tracker, trylock)) { if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, line); else LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, LOP_NOSLEEP, file, line); TD_LOCKS_INC(curthread); return (1); } else if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); return (0); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_runlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_RLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, 0, file, line); LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_runlock(rm, tracker); TD_LOCKS_DEC(curthread); } #else /* * Just strip out file and line arguments if no lock debugging is enabled in * the kernel - we are called from a kernel module. */ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wlock(rm); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wunlock(rm); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { return _rm_rlock(rm, tracker, trylock); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { _rm_runlock(rm, tracker); } #endif #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef _rm_assert #endif /* * Note that this does not need to use witness_assert() for read lock * assertions since an exact count of read locks held by this thread * is computable. */ void _rm_assert(const struct rmlock *rm, int what, const char *file, int line) { int count; if (SCHEDULER_STOPPED()) return; switch (what) { case RA_LOCKED: case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_NOTRECURSED: case RA_RLOCKED: case RA_RLOCKED | RA_RECURSED: case RA_RLOCKED | RA_NOTRECURSED: /* * Handle the write-locked case. Unlike other * primitives, writers can never recurse. */ if (rm_wowned(rm)) { if (what & RA_RLOCKED) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; } critical_enter(); count = rm_trackers_present(get_pcpu(), rm, curthread); critical_exit(); if (count == 0) panic("Lock %s not %slocked @ %s:%d\n", rm->lock_object.lo_name, (what & RA_RLOCKED) ? "read " : "", file, line); if (count > 1) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_WLOCKED: if (!rm_wowned(rm)) panic("Lock %s not exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_UNLOCKED: if (rm_wowned(rm)) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); critical_enter(); count = rm_trackers_present(get_pcpu(), rm, curthread); critical_exit(); if (count != 0) panic("Lock %s read locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; default: panic("Unknown rm lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB static void print_tracker(struct rm_priotracker *tr) { struct thread *td; td = tr->rmp_thread; db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (tr->rmp_flags & RMPF_ONQUEUE) { db_printf("ONQUEUE"); if (tr->rmp_flags & RMPF_SIGNAL) db_printf(",SIGNAL"); } else db_printf("0"); db_printf("}\n"); } static void db_show_rm(const struct lock_object *lock) { struct rm_priotracker *tr; struct rm_queue *queue; const struct rmlock *rm; struct lock_class *lc; struct pcpu *pc; rm = (const struct rmlock *)lock; db_printf(" writecpus: "); ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); db_printf("\n"); db_printf(" per-CPU readers:\n"); STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tr = (struct rm_priotracker *)queue; if (tr->rmp_rmlock == rm) print_tracker(tr); } db_printf(" active readers:\n"); LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) print_tracker(tr); lc = LOCK_CLASS(&rm->rm_wlock_object); db_printf("Backing write-lock (%s):\n", lc->lc_name); lc->lc_ddb_show(&rm->rm_wlock_object); } #endif /* * Read-mostly sleepable locks. * * These primitives allow both readers and writers to sleep. However, neither * readers nor writers are tracked and subsequently there is no priority * propagation. * * They are intended to be only used when write-locking is almost never needed * (e.g., they can guard against unloading a kernel module) while read-locking * happens all the time. * * Concurrent writers take turns taking the lock while going off cpu. If this is * of concern for your usecase, this is not the right primitive. * * Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt * fences are inserted to ensure ordering with the code executed in the IPI * handler. * * No attempt is made to track which CPUs read locked at least once, * consequently write locking sends IPIs to all of them. This will become a * problem at some point. The easiest way to lessen it is to provide a bitmap. */ #define RMS_NOOWNER ((void *)0x1) #define RMS_TRANSIENT ((void *)0x2) #define RMS_FLAGMASK 0xf struct rmslock_pcpu { int influx; int readers; }; _Static_assert(sizeof(struct rmslock_pcpu) == 8, "bad size"); /* * Internal routines */ static struct rmslock_pcpu * rms_int_pcpu(struct rmslock *rms) { CRITICAL_ASSERT(curthread); return (zpcpu_get(rms->pcpu)); } static struct rmslock_pcpu * rms_int_remote_pcpu(struct rmslock *rms, int cpu) { return (zpcpu_get_cpu(rms->pcpu, cpu)); } static void rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu) { CRITICAL_ASSERT(curthread); MPASS(pcpu->influx == 0); pcpu->influx = 1; } static void rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu) { CRITICAL_ASSERT(curthread); MPASS(pcpu->influx == 1); pcpu->influx = 0; } #ifdef INVARIANTS static void rms_int_debug_readers_inc(struct rmslock *rms) { int old; old = atomic_fetchadd_int(&rms->debug_readers, 1); KASSERT(old >= 0, ("%s: bad readers count %d\n", __func__, old)); } static void rms_int_debug_readers_dec(struct rmslock *rms) { int old; old = atomic_fetchadd_int(&rms->debug_readers, -1); KASSERT(old > 0, ("%s: bad readers count %d\n", __func__, old)); } #else static void rms_int_debug_readers_inc(struct rmslock *rms) { } static void rms_int_debug_readers_dec(struct rmslock *rms) { } #endif static void rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu) { CRITICAL_ASSERT(curthread); rms_int_debug_readers_inc(rms); pcpu->readers++; } static void rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu) { CRITICAL_ASSERT(curthread); rms_int_debug_readers_dec(rms); pcpu->readers--; } /* * Public API */ void rms_init(struct rmslock *rms, const char *name) { rms->owner = RMS_NOOWNER; rms->writers = 0; rms->readers = 0; rms->debug_readers = 0; mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW); rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO); } void rms_destroy(struct rmslock *rms) { MPASS(rms->writers == 0); MPASS(rms->readers == 0); mtx_destroy(&rms->mtx); uma_zfree_pcpu(pcpu_zone_8, rms->pcpu); } static void __noinline rms_rlock_fallback(struct rmslock *rms) { rms_int_influx_exit(rms, rms_int_pcpu(rms)); critical_exit(); mtx_lock(&rms->mtx); while (rms->writers > 0) msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0); critical_enter(); rms_int_readers_inc(rms, rms_int_pcpu(rms)); mtx_unlock(&rms->mtx); critical_exit(); } void rms_rlock(struct rmslock *rms) { struct rmslock_pcpu *pcpu; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); MPASS(atomic_load_ptr(&rms->owner) != curthread); critical_enter(); pcpu = rms_int_pcpu(rms); rms_int_influx_enter(rms, pcpu); atomic_interrupt_fence(); if (__predict_false(rms->writers > 0)) { rms_rlock_fallback(rms); return; } atomic_interrupt_fence(); rms_int_readers_inc(rms, pcpu); atomic_interrupt_fence(); rms_int_influx_exit(rms, pcpu); critical_exit(); } int rms_try_rlock(struct rmslock *rms) { struct rmslock_pcpu *pcpu; MPASS(atomic_load_ptr(&rms->owner) != curthread); critical_enter(); pcpu = rms_int_pcpu(rms); rms_int_influx_enter(rms, pcpu); atomic_interrupt_fence(); if (__predict_false(rms->writers > 0)) { rms_int_influx_exit(rms, pcpu); critical_exit(); return (0); } atomic_interrupt_fence(); rms_int_readers_inc(rms, pcpu); atomic_interrupt_fence(); rms_int_influx_exit(rms, pcpu); critical_exit(); return (1); } static void __noinline rms_runlock_fallback(struct rmslock *rms) { rms_int_influx_exit(rms, rms_int_pcpu(rms)); critical_exit(); mtx_lock(&rms->mtx); MPASS(rms->writers > 0); MPASS(rms->readers > 0); MPASS(rms->debug_readers == rms->readers); rms_int_debug_readers_dec(rms); rms->readers--; if (rms->readers == 0) wakeup_one(&rms->writers); mtx_unlock(&rms->mtx); } void rms_runlock(struct rmslock *rms) { struct rmslock_pcpu *pcpu; critical_enter(); pcpu = rms_int_pcpu(rms); rms_int_influx_enter(rms, pcpu); atomic_interrupt_fence(); if (__predict_false(rms->writers > 0)) { rms_runlock_fallback(rms); return; } atomic_interrupt_fence(); rms_int_readers_dec(rms, pcpu); atomic_interrupt_fence(); rms_int_influx_exit(rms, pcpu); critical_exit(); } struct rmslock_ipi { struct rmslock *rms; struct smp_rendezvous_cpus_retry_arg srcra; }; static void rms_action_func(void *arg) { struct rmslock_ipi *rmsipi; struct rmslock_pcpu *pcpu; struct rmslock *rms; rmsipi = __containerof(arg, struct rmslock_ipi, srcra); rms = rmsipi->rms; pcpu = rms_int_pcpu(rms); if (pcpu->influx) return; if (pcpu->readers != 0) { atomic_add_int(&rms->readers, pcpu->readers); pcpu->readers = 0; } smp_rendezvous_cpus_done(arg); } static void rms_wait_func(void *arg, int cpu) { struct rmslock_ipi *rmsipi; struct rmslock_pcpu *pcpu; struct rmslock *rms; rmsipi = __containerof(arg, struct rmslock_ipi, srcra); rms = rmsipi->rms; pcpu = rms_int_remote_pcpu(rms, cpu); while (atomic_load_int(&pcpu->influx)) cpu_spinwait(); } #ifdef INVARIANTS static void rms_assert_no_pcpu_readers(struct rmslock *rms) { struct rmslock_pcpu *pcpu; int cpu; CPU_FOREACH(cpu) { pcpu = rms_int_remote_pcpu(rms, cpu); if (pcpu->readers != 0) { panic("%s: got %d readers on cpu %d\n", __func__, pcpu->readers, cpu); } } } #else static void rms_assert_no_pcpu_readers(struct rmslock *rms) { } #endif static void rms_wlock_switch(struct rmslock *rms) { struct rmslock_ipi rmsipi; MPASS(rms->readers == 0); MPASS(rms->writers == 1); rmsipi.rms = rms; smp_rendezvous_cpus_retry(all_cpus, smp_no_rendezvous_barrier, rms_action_func, smp_no_rendezvous_barrier, rms_wait_func, &rmsipi.srcra); } void rms_wlock(struct rmslock *rms) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); MPASS(atomic_load_ptr(&rms->owner) != curthread); mtx_lock(&rms->mtx); rms->writers++; if (rms->writers > 1) { msleep(&rms->owner, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); MPASS(rms->readers == 0); KASSERT(rms->owner == RMS_TRANSIENT, ("%s: unexpected owner value %p\n", __func__, rms->owner)); goto out_grab; } KASSERT(rms->owner == RMS_NOOWNER, ("%s: unexpected owner value %p\n", __func__, rms->owner)); rms_wlock_switch(rms); rms_assert_no_pcpu_readers(rms); if (rms->readers > 0) { msleep(&rms->writers, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); } out_grab: rms->owner = curthread; rms_assert_no_pcpu_readers(rms); mtx_unlock(&rms->mtx); MPASS(rms->readers == 0); } void rms_wunlock(struct rmslock *rms) { mtx_lock(&rms->mtx); KASSERT(rms->owner == curthread, ("%s: unexpected owner value %p\n", __func__, rms->owner)); MPASS(rms->writers >= 1); MPASS(rms->readers == 0); rms->writers--; if (rms->writers > 0) { wakeup_one(&rms->owner); rms->owner = RMS_TRANSIENT; } else { wakeup(&rms->readers); rms->owner = RMS_NOOWNER; } mtx_unlock(&rms->mtx); } void rms_unlock(struct rmslock *rms) { if (rms_wowned(rms)) rms_wunlock(rms); else rms_runlock(rms); } diff --git a/sys/sys/rmlock.h b/sys/sys/rmlock.h index 3e4db5d853c6..5aaf8f039026 100644 --- a/sys/sys/rmlock.h +++ b/sys/sys/rmlock.h @@ -1,178 +1,179 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_RMLOCK_H_ #define _SYS_RMLOCK_H_ #include #include #include #include #ifdef _KERNEL /* * Flags passed to rm_init_flags(9). */ #define RM_NOWITNESS 0x00000001 #define RM_RECURSE 0x00000002 #define RM_SLEEPABLE 0x00000004 #define RM_NEW 0x00000008 +#define RM_DUPOK 0x00000010 void rm_init(struct rmlock *rm, const char *name); void rm_init_flags(struct rmlock *rm, const char *name, int opts); void rm_destroy(struct rmlock *rm); int rm_wowned(const struct rmlock *rm); void rm_sysinit(void *arg); void _rm_wlock_debug(struct rmlock *rm, const char *file, int line); void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line); int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line); void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line); void _rm_wlock(struct rmlock *rm); void _rm_wunlock(struct rmlock *rm); int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock); void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void _rm_assert(const struct rmlock *rm, int what, const char *file, int line); #endif /* * Public interface for lock operations. */ #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 #define rm_wlock(rm) _rm_wlock_debug((rm), LOCK_FILE, LOCK_LINE) #define rm_wunlock(rm) _rm_wunlock_debug((rm), LOCK_FILE, LOCK_LINE) #define rm_rlock(rm,tracker) \ ((void)_rm_rlock_debug((rm),(tracker), 0, LOCK_FILE, LOCK_LINE )) #define rm_try_rlock(rm,tracker) \ _rm_rlock_debug((rm),(tracker), 1, LOCK_FILE, LOCK_LINE ) #define rm_runlock(rm,tracker) \ _rm_runlock_debug((rm), (tracker), LOCK_FILE, LOCK_LINE ) #else #define rm_wlock(rm) _rm_wlock((rm)) #define rm_wunlock(rm) _rm_wunlock((rm)) #define rm_rlock(rm,tracker) ((void)_rm_rlock((rm),(tracker), 0)) #define rm_try_rlock(rm,tracker) _rm_rlock((rm),(tracker), 1) #define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker)) #endif #define rm_sleep(chan, rm, pri, wmesg, timo) \ _sleep((chan), &(rm)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) struct rm_args { struct rmlock *ra_rm; const char *ra_desc; int ra_flags; }; #define RM_SYSINIT_FLAGS(name, rm, desc, flags) \ static struct rm_args name##_args = { \ (rm), \ (desc), \ (flags), \ }; \ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_sysinit, &name##_args); \ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_destroy, (rm)) #define RM_SYSINIT(name, rm, desc) RM_SYSINIT_FLAGS(name, rm, desc, 0) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define RA_LOCKED LA_LOCKED #define RA_RLOCKED LA_SLOCKED #define RA_WLOCKED LA_XLOCKED #define RA_UNLOCKED LA_UNLOCKED #define RA_RECURSED LA_RECURSED #define RA_NOTRECURSED LA_NOTRECURSED #endif #ifdef INVARIANTS #define rm_assert(rm, what) _rm_assert((rm), (what), LOCK_FILE, LOCK_LINE) #else #define rm_assert(rm, what) #endif void rms_init(struct rmslock *rms, const char *name); void rms_destroy(struct rmslock *rms); void rms_rlock(struct rmslock *rms); int rms_try_rlock(struct rmslock *rms); void rms_runlock(struct rmslock *rms); void rms_wlock(struct rmslock *rms); void rms_wunlock(struct rmslock *rms); void rms_unlock(struct rmslock *rms); static inline int rms_wowned(struct rmslock *rms) { return (rms->owner == curthread); } #ifdef INVARIANTS /* * For assertion purposes. * * Main limitation is that we at best can tell there are readers, but not * whether curthread is one of them. */ static inline int rms_rowned(struct rmslock *rms) { return (rms->debug_readers > 0); } static inline int rms_owned_any(struct rmslock *rms) { if (rms_wowned(rms)) return (1); return (rms_rowned(rms)); } #endif #endif /* _KERNEL */ #endif /* !_SYS_RMLOCK_H_ */