Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/kern_synch.c
Show All 38 Lines | |||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include "opt_ktrace.h" | #include "opt_ktrace.h" | ||||
#include "opt_sched.h" | #include "opt_sched.h" | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/blockcount.h> | |||||
#include <sys/condvar.h> | #include <sys/condvar.h> | ||||
#include <sys/kdb.h> | #include <sys/kdb.h> | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <sys/ktr.h> | #include <sys/ktr.h> | ||||
#include <sys/lock.h> | #include <sys/lock.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/resourcevar.h> | #include <sys/resourcevar.h> | ||||
#include <sys/refcount.h> | |||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sdt.h> | #include <sys/sdt.h> | ||||
#include <sys/signalvar.h> | #include <sys/signalvar.h> | ||||
#include <sys/sleepqueue.h> | #include <sys/sleepqueue.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/sysproto.h> | #include <sys/sysproto.h> | ||||
▲ Show 20 Lines • Show All 268 Lines • ▼ Show 20 Lines | if (sbt > 0) | ||||
DELAY(sbt); | DELAY(sbt); | ||||
return (EWOULDBLOCK); | return (EWOULDBLOCK); | ||||
} | } | ||||
return (_sleep(&pause_wchan[curcpu], NULL, | return (_sleep(&pause_wchan[curcpu], NULL, | ||||
(flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); | (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); | ||||
} | } | ||||
/* | /* | ||||
* Potentially release the last reference for refcount. Check for | |||||
* unlikely conditions and signal the caller as to whether it was | |||||
* the final ref. | |||||
*/ | |||||
bool | |||||
refcount_release_last(volatile u_int *count, u_int n, u_int old) | |||||
{ | |||||
u_int waiter; | |||||
waiter = old & REFCOUNT_WAITER; | |||||
old = REFCOUNT_COUNT(old); | |||||
if (__predict_false(n > old || REFCOUNT_SATURATED(old))) { | |||||
/* | |||||
* Avoid multiple destructor invocations if underflow occurred. | |||||
* This is not perfect since the memory backing the containing | |||||
* object may already have been reallocated. | |||||
*/ | |||||
_refcount_update_saturated(count); | |||||
return (false); | |||||
} | |||||
/* | |||||
* Attempt to atomically clear the waiter bit. Wakeup waiters | |||||
* if we are successful. | |||||
*/ | |||||
if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0)) | |||||
wakeup(__DEVOLATILE(u_int *, count)); | |||||
/* | |||||
* Last reference. Signal the user to call the destructor. | |||||
* | |||||
* Ensure that the destructor sees all updates. This synchronizes | |||||
* with release fences from all routines which drop the count. | |||||
*/ | |||||
atomic_thread_fence_acq(); | |||||
return (true); | |||||
} | |||||
/* | |||||
* Wait for a refcount wakeup. This does not guarantee that the ref is still | |||||
* zero on return and may be subject to transient wakeups. Callers wanting | |||||
* a precise answer should use refcount_wait(). | |||||
*/ | |||||
void | |||||
_refcount_sleep(volatile u_int *count, struct lock_object *lock, | |||||
const char *wmesg, int pri) | |||||
{ | |||||
void *wchan; | |||||
u_int old; | |||||
if (REFCOUNT_COUNT(*count) == 0) { | |||||
if (lock != NULL) | |||||
LOCK_CLASS(lock)->lc_unlock(lock); | |||||
return; | |||||
} | |||||
wchan = __DEVOLATILE(void *, count); | |||||
sleepq_lock(wchan); | |||||
if (lock != NULL) | |||||
LOCK_CLASS(lock)->lc_unlock(lock); | |||||
old = *count; | |||||
for (;;) { | |||||
if (REFCOUNT_COUNT(old) == 0) { | |||||
sleepq_release(wchan); | |||||
return; | |||||
} | |||||
if (old & REFCOUNT_WAITER) | |||||
break; | |||||
if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) | |||||
break; | |||||
} | |||||
sleepq_add(wchan, NULL, wmesg, 0, 0); | |||||
sleepq_wait(wchan, pri); | |||||
} | |||||
/* | |||||
* Make all threads sleeping on the specified identifier runnable. | * Make all threads sleeping on the specified identifier runnable. | ||||
*/ | */ | ||||
void | void | ||||
wakeup(const void *ident) | wakeup(const void *ident) | ||||
{ | { | ||||
int wakeup_swapper; | int wakeup_swapper; | ||||
sleepq_lock(ident); | sleepq_lock(ident); | ||||
Show All 29 Lines | wakeup_any(const void *ident) | ||||
int wakeup_swapper; | int wakeup_swapper; | ||||
sleepq_lock(ident); | sleepq_lock(ident); | ||||
wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR, | wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR, | ||||
0, 0); | 0, 0); | ||||
sleepq_release(ident); | sleepq_release(ident); | ||||
if (wakeup_swapper) | if (wakeup_swapper) | ||||
kick_proc0(); | kick_proc0(); | ||||
} | |||||
/* | |||||
* Signal sleeping waiters after the counter has reached zero. | |||||
*/ | |||||
void | |||||
_blockcount_wakeup(blockcount_t *count, u_int old) | |||||
{ | |||||
KASSERT(_BLOCKCOUNT_WAITERS(old), | |||||
("blockcount_wakeup: no waiters on %p", count)); | |||||
mjg: other blockcount routines use
```
__func__
```
instead of hardcoding the name
side note is… | |||||
Done Inline ActionsYes, I would like KASSERT() to do this automatically. Perhaps a coccinelle script can be used to fix up the ~9000 existing users to avoid printing the function name twice. markj: Yes, I would like KASSERT() to do this automatically. Perhaps a coccinelle script can be used… | |||||
Not Done Inline Actionscoccinelle is a little dodgy for sweeping like this and I don't think it's a good idea. Instead I created D23774. mjg: coccinelle is a little dodgy for sweeping like this and I don't think it's a good idea. Instead… | |||||
if (atomic_cmpset_int(&count->__count, _BLOCKCOUNT_WAITERS_FLAG, 0)) | |||||
wakeup(__DEVOLATILE(u_int *, count)); | |||||
Not Done Inline ActionsWith the struct in place this no longer needs to __DEVOLATILE. In fact the macro should probably get augmented to fail compilation if the arg is not volatile. mjg: With the struct in place this no longer needs to __DEVOLATILE. In fact the macro should… | |||||
} | |||||
/* | |||||
* Wait for a wakeup. This does not guarantee that the count is still zero on | |||||
* return and may be subject to transient wakeups. Callers wanting a precise | |||||
* answer should use blockcount_wait() with an interlock. | |||||
* | |||||
* Return 0 if there is no work to wait for, and 1 if we slept waiting for work | |||||
* to complete. In the latter case the counter value must be re-read. | |||||
*/ | |||||
int | |||||
_blockcount_sleep(blockcount_t *count, struct lock_object *lock, | |||||
const char *wmesg, int prio) | |||||
{ | |||||
void *wchan; | |||||
uintptr_t lock_state; | |||||
u_int old; | |||||
int ret; | |||||
KASSERT(lock != &Giant.lock_object, | |||||
("%s: cannot use Giant as the interlock", __func__)); | |||||
/* | |||||
* Synchronize with the fence in blockcount_release(). If we end up | |||||
* waiting, the sleepqueue lock acquisition will provide the required | |||||
* side effects. | |||||
* | |||||
* If there is no work to wait for, but waiters are present, try to put | |||||
* ourselves to sleep to avoid jumping ahead. | |||||
*/ | |||||
if (atomic_load_acq_int(&count->__count) == 0) { | |||||
if (lock != NULL && (prio & PDROP) != 0) | |||||
LOCK_CLASS(lock)->lc_unlock(lock); | |||||
Done Inline ActionsIt probably does not matter for runtime but you should add DROP/PICKUP GIANT around sleepq_add/wait. Every other place is supposed to do it. mjg: It probably does not matter for runtime but you should add DROP/PICKUP GIANT around… | |||||
return (0); | |||||
} | |||||
lock_state = 0; | |||||
wchan = __DEVOLATILE(void *, count); | |||||
sleepq_lock(wchan); | |||||
DROP_GIANT(); | |||||
if (lock != NULL) | |||||
lock_state = LOCK_CLASS(lock)->lc_unlock(lock); | |||||
old = blockcount_read(count); | |||||
do { | |||||
if (_BLOCKCOUNT_COUNT(old) == 0) { | |||||
sleepq_release(wchan); | |||||
ret = 0; | |||||
goto out; | |||||
} | |||||
if (_BLOCKCOUNT_WAITERS(old)) | |||||
break; | |||||
} while (!atomic_fcmpset_int(&count->__count, &old, | |||||
old | _BLOCKCOUNT_WAITERS_FLAG)); | |||||
sleepq_add(wchan, NULL, wmesg, 0, 0); | |||||
sleepq_wait(wchan, prio); | |||||
ret = 1; | |||||
out: | |||||
PICKUP_GIANT(); | |||||
if (lock != NULL && (prio & PDROP) == 0) | |||||
LOCK_CLASS(lock)->lc_lock(lock, lock_state); | |||||
return (ret); | |||||
} | } | ||||
static void | static void | ||||
kdb_switch(void) | kdb_switch(void) | ||||
{ | { | ||||
thread_unlock(curthread); | thread_unlock(curthread); | ||||
kdb_backtrace(); | kdb_backtrace(); | ||||
kdb_reenter(); | kdb_reenter(); | ||||
▲ Show 20 Lines • Show All 207 Lines • Show Last 20 Lines |
other blockcount routines use
instead of hardcoding the name
side note is that probably we shlould just get a KASSERT variant which prefixes everything with func:line or similar