Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/kern_sx.c
Show First 20 Lines • Show All 468 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Downgrade an unrecursed exclusive lock into a single shared lock. | * Downgrade an unrecursed exclusive lock into a single shared lock. | ||||
*/ | */ | ||||
void | void | ||||
sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) | sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) | ||||
{ | { | ||||
uintptr_t x; | uintptr_t x; | ||||
int wakeup_swapper; | |||||
if (SCHEDULER_STOPPED()) | if (SCHEDULER_STOPPED()) | ||||
return; | return; | ||||
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, | KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, | ||||
("sx_downgrade() of destroyed sx @ %s:%d", file, line)); | ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); | ||||
_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); | _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); | ||||
#ifndef INVARIANTS | #ifndef INVARIANTS | ||||
Show All 25 Lines | #endif | ||||
* without any races and wakeup any shared waiters. | * without any races and wakeup any shared waiters. | ||||
*/ | */ | ||||
sleepq_lock(&sx->lock_object); | sleepq_lock(&sx->lock_object); | ||||
/* | /* | ||||
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single | * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single | ||||
* shared lock. If there are any shared waiters, wake them up. | * shared lock. If there are any shared waiters, wake them up. | ||||
*/ | */ | ||||
wakeup_swapper = 0; | |||||
x = sx->sx_lock; | x = sx->sx_lock; | ||||
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | | atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | | ||||
(x & SX_LOCK_EXCLUSIVE_WAITERS)); | (x & SX_LOCK_EXCLUSIVE_WAITERS)); | ||||
if (x & SX_LOCK_SHARED_WAITERS) | if (x & SX_LOCK_SHARED_WAITERS) | ||||
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, | sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, | ||||
0, SQ_SHARED_QUEUE); | SQ_SHARED_QUEUE); | ||||
sleepq_release(&sx->lock_object); | sleepq_release(&sx->lock_object); | ||||
if (wakeup_swapper) | |||||
kick_proc0(); | |||||
out: | out: | ||||
curthread->td_sx_slocks++; | curthread->td_sx_slocks++; | ||||
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); | LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); | ||||
LOCKSTAT_RECORD0(sx__downgrade, sx); | LOCKSTAT_RECORD0(sx__downgrade, sx); | ||||
} | } | ||||
void | void | ||||
sx_downgrade_(struct sx *sx, const char *file, int line) | sx_downgrade_(struct sx *sx, const char *file, int line) | ||||
▲ Show 20 Lines • Show All 376 Lines • ▼ Show 20 Lines | |||||
* operation. All 'easy case' failures are redirected to this. Note | * operation. All 'easy case' failures are redirected to this. Note | ||||
* that ideally this would be a static function, but it needs to be | * that ideally this would be a static function, but it needs to be | ||||
* accessible from at least sx.h. | * accessible from at least sx.h. | ||||
*/ | */ | ||||
void | void | ||||
_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) | _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) | ||||
{ | { | ||||
uintptr_t tid, setx; | uintptr_t tid, setx; | ||||
int queue, wakeup_swapper; | int queue; | ||||
if (SCHEDULER_STOPPED()) | if (SCHEDULER_STOPPED()) | ||||
return; | return; | ||||
tid = (uintptr_t)curthread; | tid = (uintptr_t)curthread; | ||||
if (__predict_false(x == tid)) | if (__predict_false(x == tid)) | ||||
x = SX_READ_VALUE(sx); | x = SX_READ_VALUE(sx); | ||||
Show All 40 Lines | _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) | ||||
atomic_store_rel_ptr(&sx->sx_lock, setx); | atomic_store_rel_ptr(&sx->sx_lock, setx); | ||||
/* Wake up all the waiters for the specific queue. */ | /* Wake up all the waiters for the specific queue. */ | ||||
if (LOCK_LOG_TEST(&sx->lock_object, 0)) | if (LOCK_LOG_TEST(&sx->lock_object, 0)) | ||||
CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", | CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", | ||||
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : | __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : | ||||
"exclusive"); | "exclusive"); | ||||
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, | sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); | ||||
queue); | |||||
sleepq_release(&sx->lock_object); | sleepq_release(&sx->lock_object); | ||||
if (wakeup_swapper) | |||||
kick_proc0(); | |||||
} | } | ||||
static __always_inline bool | static __always_inline bool | ||||
__sx_can_read(struct thread *td, uintptr_t x, bool fp) | __sx_can_read(struct thread *td, uintptr_t x, bool fp) | ||||
{ | { | ||||
if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) | if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) | ||||
== SX_LOCK_SHARED) | == SX_LOCK_SHARED) | ||||
▲ Show 20 Lines • Show All 335 Lines • ▼ Show 20 Lines | _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp) | ||||
} | } | ||||
return (false); | return (false); | ||||
} | } | ||||
static void __noinline | static void __noinline | ||||
_sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x | _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x | ||||
LOCK_FILE_LINE_ARG_DEF) | LOCK_FILE_LINE_ARG_DEF) | ||||
{ | { | ||||
int wakeup_swapper = 0; | |||||
uintptr_t setx, queue; | uintptr_t setx, queue; | ||||
if (SCHEDULER_STOPPED()) | if (SCHEDULER_STOPPED()) | ||||
return; | return; | ||||
if (_sx_sunlock_try(sx, td, &x)) | if (_sx_sunlock_try(sx, td, &x)) | ||||
goto out_lockstat; | goto out_lockstat; | ||||
Show All 16 Lines | if (x & SX_LOCK_EXCLUSIVE_WAITERS) { | ||||
queue = SQ_EXCLUSIVE_QUEUE; | queue = SQ_EXCLUSIVE_QUEUE; | ||||
} | } | ||||
setx |= (x & SX_LOCK_WRITE_SPINNER); | setx |= (x & SX_LOCK_WRITE_SPINNER); | ||||
if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) | if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) | ||||
continue; | continue; | ||||
if (LOCK_LOG_TEST(&sx->lock_object, 0)) | if (LOCK_LOG_TEST(&sx->lock_object, 0)) | ||||
CTR2(KTR_LOCK, "%s: %p waking up all thread on" | CTR2(KTR_LOCK, "%s: %p waking up all thread on" | ||||
"exclusive queue", __func__, sx); | "exclusive queue", __func__, sx); | ||||
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, | sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); | ||||
0, queue); | |||||
td->td_sx_slocks--; | td->td_sx_slocks--; | ||||
break; | break; | ||||
} | } | ||||
sleepq_release(&sx->lock_object); | sleepq_release(&sx->lock_object); | ||||
if (wakeup_swapper) | |||||
kick_proc0(); | |||||
out_lockstat: | out_lockstat: | ||||
LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); | LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); | ||||
} | } | ||||
void | void | ||||
_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) | _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) | ||||
{ | { | ||||
struct thread *td; | struct thread *td; | ||||
▲ Show 20 Lines • Show All 191 Lines • Show Last 20 Lines |