Changeset View
Changeset View
Standalone View
Standalone View
sys/sys/mutex.h
Show First 20 Lines • Show All 182 Lines • ▼ Show 20 Lines | |||||
/* Lock a normal mutex. */ | /* Lock a normal mutex. */ | ||||
#define __mtx_lock(mp, tid, opts, file, line) do { \ | #define __mtx_lock(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
\ | \ | ||||
if (!_mtx_obtain_lock((mp), _tid)) \ | if (!_mtx_obtain_lock((mp), _tid)) \ | ||||
_mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ | _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ | ||||
else \ | else \ | ||||
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \ | LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, \ | ||||
mp, 0, 0, (file), (line)); \ | mp, 0, 0, (file), (line), 0); \ | ||||
} while (0) | } while (0) | ||||
/* | /* | ||||
* Lock a spin mutex. For spinlocks, we handle recursion inline (it | * Lock a spin mutex. For spinlocks, we handle recursion inline (it | ||||
* turns out that function calls can be significantly expensive on | * turns out that function calls can be significantly expensive on | ||||
* some architectures). Since spin locks are not _too_ common, | * some architectures). Since spin locks are not _too_ common, | ||||
* inlining this code is not too big a deal. | * inlining this code is not too big a deal. | ||||
*/ | */ | ||||
#ifdef SMP | #ifdef SMP | ||||
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
\ | \ | ||||
spinlock_enter(); \ | spinlock_enter(); \ | ||||
if (!_mtx_obtain_lock((mp), _tid)) { \ | if (!_mtx_obtain_lock((mp), _tid)) { \ | ||||
if ((mp)->mtx_lock == _tid) \ | if ((mp)->mtx_lock == _tid) \ | ||||
(mp)->mtx_recurse++; \ | (mp)->mtx_recurse++; \ | ||||
else \ | else \ | ||||
_mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ | _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ | ||||
} else \ | } else \ | ||||
LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \ | LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ | ||||
mp, 0, 0, (file), (line)); \ | mp, 0, 0, (file), (line), 0); \ | ||||
} while (0) | } while (0) | ||||
#else /* SMP */ | #else /* SMP */ | ||||
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
\ | \ | ||||
spinlock_enter(); \ | spinlock_enter(); \ | ||||
if ((mp)->mtx_lock == _tid) \ | if ((mp)->mtx_lock == _tid) \ | ||||
(mp)->mtx_recurse++; \ | (mp)->mtx_recurse++; \ | ||||
else { \ | else { \ | ||||
KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ | KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ | ||||
(mp)->mtx_lock = _tid; \ | (mp)->mtx_lock = _tid; \ | ||||
} \ | } \ | ||||
} while (0) | } while (0) | ||||
#endif /* SMP */ | #endif /* SMP */ | ||||
/* Unlock a normal mutex. */ | /* Unlock a normal mutex. */ | ||||
#define __mtx_unlock(mp, tid, opts, file, line) do { \ | #define __mtx_unlock(mp, tid, opts, file, line) do { \ | ||||
uintptr_t _tid = (uintptr_t)(tid); \ | uintptr_t _tid = (uintptr_t)(tid); \ | ||||
\ | \ | ||||
if ((mp)->mtx_recurse == 0) \ | if ((mp)->mtx_recurse == 0) \ | ||||
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, \ | LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, mp, 0); \ | ||||
(mp)); \ | |||||
if (!_mtx_release_lock((mp), _tid)) \ | if (!_mtx_release_lock((mp), _tid)) \ | ||||
_mtx_unlock_sleep((mp), (opts), (file), (line)); \ | _mtx_unlock_sleep((mp), (opts), (file), (line)); \ | ||||
} while (0) | } while (0) | ||||
/* | /* | ||||
* Unlock a spin mutex. For spinlocks, we can handle everything | * Unlock a spin mutex. For spinlocks, we can handle everything | ||||
* inline, as it's pretty simple and a function call would be too | * inline, as it's pretty simple and a function call would be too | ||||
* expensive (at least on some architectures). Since spin locks are | * expensive (at least on some architectures). Since spin locks are | ||||
* not _too_ common, inlining this code is not too big a deal. | * not _too_ common, inlining this code is not too big a deal. | ||||
* | * | ||||
* Since we always perform a spinlock_enter() when attempting to acquire a | * Since we always perform a spinlock_enter() when attempting to acquire a | ||||
* spin lock, we need to always perform a matching spinlock_exit() when | * spin lock, we need to always perform a matching spinlock_exit() when | ||||
* releasing a spin lock. This includes the recursion cases. | * releasing a spin lock. This includes the recursion cases. | ||||
*/ | */ | ||||
#ifdef SMP | #ifdef SMP | ||||
#define __mtx_unlock_spin(mp) do { \ | #define __mtx_unlock_spin(mp) do { \ | ||||
if (mtx_recursed((mp))) \ | if (mtx_recursed((mp))) \ | ||||
(mp)->mtx_recurse--; \ | (mp)->mtx_recurse--; \ | ||||
else { \ | else { \ | ||||
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ | LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp, 0); \ | ||||
mp); \ | |||||
_mtx_release_lock_quick((mp)); \ | _mtx_release_lock_quick((mp)); \ | ||||
} \ | } \ | ||||
spinlock_exit(); \ | spinlock_exit(); \ | ||||
} while (0) | } while (0) | ||||
#else /* SMP */ | #else /* SMP */ | ||||
#define __mtx_unlock_spin(mp) do { \ | #define __mtx_unlock_spin(mp) do { \ | ||||
if (mtx_recursed((mp))) \ | if (mtx_recursed((mp))) \ | ||||
(mp)->mtx_recurse--; \ | (mp)->mtx_recurse--; \ | ||||
else { \ | else { \ | ||||
LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ | LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp, 0); \ | ||||
mp); \ | |||||
(mp)->mtx_lock = MTX_UNOWNED; \ | (mp)->mtx_lock = MTX_UNOWNED; \ | ||||
} \ | } \ | ||||
spinlock_exit(); \ | spinlock_exit(); \ | ||||
} while (0) | } while (0) | ||||
#endif /* SMP */ | #endif /* SMP */ | ||||
/* | /* | ||||
* Exported lock manipulation interface. | * Exported lock manipulation interface. | ||||
▲ Show 20 Lines • Show All 191 Lines • Show Last 20 Lines |