Changeset View
Changeset View
Standalone View
Standalone View
head/sys/sys/refcount.h
Show All 33 Lines | |||||
#ifdef _KERNEL | #ifdef _KERNEL | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#else | #else | ||||
#include <stdbool.h> | #include <stdbool.h> | ||||
#define KASSERT(exp, msg) /* */ | #define KASSERT(exp, msg) /* */ | ||||
#endif | #endif | ||||
#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0) | #define REFCOUNT_WAITER (1 << 31) /* Refcount has waiter. */ | ||||
#define REFCOUNT_SATURATION_VALUE (3U << 30) | #define REFCOUNT_SATURATION_VALUE (3U << 29) | ||||
#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0) | |||||
#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) | |||||
bool refcount_release_last(volatile u_int *count, u_int n, u_int old); | |||||
void refcount_sleep(volatile u_int *count, const char *wmesg, int prio); | |||||
/* | /* | ||||
* Attempt to handle reference count overflow and underflow. Force the counter | * Attempt to handle reference count overflow and underflow. Force the counter | ||||
* to stay at the saturation value so that a counter overflow cannot trigger | * to stay at the saturation value so that a counter overflow cannot trigger | ||||
* destruction of the containing object and instead leads to a less harmful | * destruction of the containing object and instead leads to a less harmful | ||||
* memory leak. | * memory leak. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
_refcount_update_saturated(volatile u_int *count) | _refcount_update_saturated(volatile u_int *count) | ||||
Show All 18 Lines | |||||
{ | { | ||||
u_int old; | u_int old; | ||||
old = atomic_fetchadd_int(count, 1); | old = atomic_fetchadd_int(count, 1); | ||||
if (__predict_false(REFCOUNT_SATURATED(old))) | if (__predict_false(REFCOUNT_SATURATED(old))) | ||||
_refcount_update_saturated(count); | _refcount_update_saturated(count); | ||||
} | } | ||||
static __inline void | |||||
refcount_acquiren(volatile u_int *count, u_int n) | |||||
{ | |||||
u_int old; | |||||
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, | |||||
("refcount_acquiren: n %d too large", n)); | |||||
old = atomic_fetchadd_int(count, n); | |||||
if (__predict_false(REFCOUNT_SATURATED(old))) | |||||
_refcount_update_saturated(count); | |||||
} | |||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_acquire_checked(volatile u_int *count) | refcount_acquire_checked(volatile u_int *count) | ||||
{ | { | ||||
u_int lcount; | u_int lcount; | ||||
for (lcount = *count;;) { | for (lcount = *count;;) { | ||||
if (__predict_false(REFCOUNT_SATURATED(lcount + 1))) | if (__predict_false(REFCOUNT_SATURATED(lcount + 1))) | ||||
return (false); | return (false); | ||||
if (__predict_true(atomic_fcmpset_int(count, &lcount, | if (__predict_true(atomic_fcmpset_int(count, &lcount, | ||||
lcount + 1) == 1)) | lcount + 1) == 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
static __inline bool | static __inline bool | ||||
refcount_release(volatile u_int *count) | refcount_releasen(volatile u_int *count, u_int n) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, | |||||
("refcount_releasen: n %d too large", n)); | |||||
atomic_thread_fence_rel(); | atomic_thread_fence_rel(); | ||||
old = atomic_fetchadd_int(count, -1); | old = atomic_fetchadd_int(count, -n); | ||||
if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) { | if (__predict_false(n >= REFCOUNT_COUNT(old) || | ||||
/* | REFCOUNT_SATURATED(old))) | ||||
* Avoid multiple destructor invocations if underflow occurred. | return (refcount_release_last(count, n, old)); | ||||
* This is not perfect since the memory backing the containing | |||||
* object may already have been reallocated. | |||||
*/ | |||||
_refcount_update_saturated(count); | |||||
return (false); | return (false); | ||||
} | } | ||||
if (old > 1) | |||||
return (false); | |||||
/* | static __inline bool | ||||
* Last reference. Signal the user to call the destructor. | refcount_release(volatile u_int *count) | ||||
* | { | ||||
* Ensure that the destructor sees all updates. The fence_rel | |||||
* at the start of the function synchronizes with this fence. | return (refcount_releasen(count, 1)); | ||||
*/ | |||||
atomic_thread_fence_acq(); | |||||
return (true); | |||||
} | } | ||||
static __inline void | |||||
refcount_wait(volatile u_int *count, const char *wmesg, int prio) | |||||
{ | |||||
while (*count != 0) | |||||
refcount_sleep(count, wmesg, prio); | |||||
} | |||||
/* | /* | ||||
* This functions returns non-zero if the refcount was | * This functions returns non-zero if the refcount was | ||||
* incremented. Else zero is returned. | * incremented. Else zero is returned. | ||||
*/ | */ | ||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_acquire_if_not_zero(volatile u_int *count) | refcount_acquire_if_not_zero(volatile u_int *count) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
old = *count; | old = *count; | ||||
for (;;) { | for (;;) { | ||||
if (old == 0) | if (REFCOUNT_COUNT(old) == 0) | ||||
return (false); | return (false); | ||||
if (__predict_false(REFCOUNT_SATURATED(old))) | if (__predict_false(REFCOUNT_SATURATED(old))) | ||||
return (true); | return (true); | ||||
if (atomic_fcmpset_int(count, &old, old + 1)) | if (atomic_fcmpset_int(count, &old, old + 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_release_if_not_last(volatile u_int *count) | refcount_release_if_not_last(volatile u_int *count) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
old = *count; | old = *count; | ||||
for (;;) { | for (;;) { | ||||
if (old == 1) | if (REFCOUNT_COUNT(old) == 1) | ||||
return (false); | return (false); | ||||
if (__predict_false(REFCOUNT_SATURATED(old))) | if (__predict_false(REFCOUNT_SATURATED(old))) | ||||
return (true); | return (true); | ||||
if (atomic_fcmpset_int(count, &old, old - 1)) | if (atomic_fcmpset_int(count, &old, old - 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
#endif /* ! __SYS_REFCOUNT_H__ */ | #endif /* ! __SYS_REFCOUNT_H__ */ |