Changeset View
Changeset View
Standalone View
Standalone View
sys/sys/mutex.h
Show First 20 Lines • Show All 234 Lines • ▼ Show 20 Lines | |||||
* the work is deferred to another function. | * the work is deferred to another function. | ||||
*/ | */ | ||||
/* Lock a normal mutex. */ | /* Lock a normal mutex. */ | ||||
#define __mtx_lock(mp, tid, opts, file, line) do { \ | #define __mtx_lock(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
uintptr_t _v = MTX_UNOWNED; \ | uintptr_t _v = MTX_UNOWNED; \ | ||||
\ | \ | ||||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ | if (__predict_false_noprobe(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ | ||||
!_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ | !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ | ||||
_mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ | _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ | ||||
} while (0) | } while (0) | ||||
/* | /* | ||||
* Lock a spin mutex. For spinlocks, we handle recursion inline (it | * Lock a spin mutex. For spinlocks, we handle recursion inline (it | ||||
* turns out that function calls can be significantly expensive on | * turns out that function calls can be significantly expensive on | ||||
* some architectures). Since spin locks are not _too_ common, | * some architectures). Since spin locks are not _too_ common, | ||||
* inlining this code is not too big a deal. | * inlining this code is not too big a deal. | ||||
*/ | */ | ||||
#ifdef SMP | #ifdef SMP | ||||
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
uintptr_t _v = MTX_UNOWNED; \ | uintptr_t _v = MTX_UNOWNED; \ | ||||
\ | \ | ||||
spinlock_enter(); \ | spinlock_enter(); \ | ||||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ | if (__predict_false_noprobe(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ | ||||
!_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ | !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ | ||||
_mtx_lock_spin((mp), _v, (opts), (file), (line)); \ | _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ | ||||
} while (0) | } while (0) | ||||
#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ | #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
int _ret; \ | int _ret; \ | ||||
\ | \ | ||||
spinlock_enter(); \ | spinlock_enter(); \ | ||||
Show All 34 Lines | #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ | ||||
_ret; \ | _ret; \ | ||||
}) | }) | ||||
#endif /* SMP */ | #endif /* SMP */ | ||||
/* Unlock a normal mutex. */ | /* Unlock a normal mutex. */ | ||||
#define __mtx_unlock(mp, tid, opts, file, line) do { \ | #define __mtx_unlock(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _v = (uintptr_t)(tid); \ | uintptr_t _v = (uintptr_t)(tid); \ | ||||
\ | \ | ||||
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ | if (__predict_false_noprobe(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ | ||||
!_mtx_release_lock_fetch((mp), &_v))) \ | !_mtx_release_lock_fetch((mp), &_v))) \ | ||||
_mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ | _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ | ||||
} while (0) | } while (0) | ||||
/* | /* | ||||
* Unlock a spin mutex. For spinlocks, we can handle everything | * Unlock a spin mutex. For spinlocks, we can handle everything | ||||
* inline, as it's pretty simple and a function call would be too | * inline, as it's pretty simple and a function call would be too | ||||
* expensive (at least on some architectures). Since spin locks are | * expensive (at least on some architectures). Since spin locks are | ||||
▲ Show 20 Lines • Show All 238 Lines • Show Last 20 Lines |