Changeset View
Changeset View
Standalone View
Standalone View
sys/sys/refcount.h
Show All 28 Lines | |||||
#ifndef __SYS_REFCOUNT_H__ | #ifndef __SYS_REFCOUNT_H__ | ||||
#define __SYS_REFCOUNT_H__ | #define __SYS_REFCOUNT_H__ | ||||
#include <machine/atomic.h> | #include <machine/atomic.h> | ||||
#ifdef _KERNEL | #ifdef _KERNEL | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/_blockcount.h> | |||||
#else | #else | ||||
#include <stdbool.h> | #include <stdbool.h> | ||||
#define KASSERT(exp, msg) /* */ | #define KASSERT(exp, msg) /* */ | ||||
#endif | #endif | ||||
#define REFCOUNT_WAITER (1U << 31) /* Refcount has waiter. */ | #define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0) | ||||
#define REFCOUNT_SATURATION_VALUE (3U << 29) | #define REFCOUNT_SATURATION_VALUE (3U << 30) | ||||
#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0) | |||||
#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) | |||||
bool refcount_release_last(volatile u_int *count, u_int n, u_int old); | |||||
/* | /* | ||||
* Attempt to handle reference count overflow and underflow. Force the counter | * Attempt to handle reference count overflow and underflow. Force the counter | ||||
* to stay at the saturation value so that a counter overflow cannot trigger | * to stay at the saturation value so that a counter overflow cannot trigger | ||||
* destruction of the containing object and instead leads to a less harmful | * destruction of the containing object and instead leads to a less harmful | ||||
* memory leak. | * memory leak. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
_refcount_update_saturated(volatile u_int *count) | _refcount_update_saturated(volatile u_int *count) | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | for (lcount = *count;;) { | ||||
if (__predict_false(REFCOUNT_SATURATED(lcount + 1))) | if (__predict_false(REFCOUNT_SATURATED(lcount + 1))) | ||||
return (false); | return (false); | ||||
if (__predict_true(atomic_fcmpset_int(count, &lcount, | if (__predict_true(atomic_fcmpset_int(count, &lcount, | ||||
lcount + 1) == 1)) | lcount + 1) == 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
static __inline bool | |||||
refcount_releasen(volatile u_int *count, u_int n) | |||||
{ | |||||
u_int old; | |||||
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, | |||||
("refcount_releasen: n=%u too large", n)); | |||||
/* | /* | ||||
* Paired with acquire fence in refcount_release_last. | |||||
*/ | |||||
atomic_thread_fence_rel(); | |||||
old = atomic_fetchadd_int(count, -n); | |||||
if (__predict_false(n >= REFCOUNT_COUNT(old) || | |||||
REFCOUNT_SATURATED(old))) | |||||
return (refcount_release_last(count, n, old)); | |||||
return (false); | |||||
} | |||||
static __inline bool | |||||
refcount_release(volatile u_int *count) | |||||
{ | |||||
return (refcount_releasen(count, 1)); | |||||
} | |||||
#ifdef _KERNEL | |||||
struct lock_object; | |||||
void _refcount_sleep(volatile u_int *count, struct lock_object *, | |||||
const char *wmesg, int prio); | |||||
static __inline void | |||||
refcount_sleep(volatile u_int *count, const char *wmesg, int prio) | |||||
{ | |||||
_refcount_sleep(count, NULL, wmesg, prio); | |||||
} | |||||
#define refcount_sleep_interlock(count, lock, wmesg, prio) \ | |||||
_refcount_sleep((count), (struct lock_object *)(lock), (wmesg), (prio)) | |||||
static __inline void | |||||
refcount_wait(volatile u_int *count, const char *wmesg, int prio) | |||||
{ | |||||
while (*count != 0) | |||||
refcount_sleep(count, wmesg, prio); | |||||
} | |||||
#endif | |||||
/* | |||||
* This functions returns non-zero if the refcount was | * This functions returns non-zero if the refcount was | ||||
* incremented. Else zero is returned. | * incremented. Else zero is returned. | ||||
*/ | */ | ||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_acquire_if_gt(volatile u_int *count, u_int n) | refcount_acquire_if_gt(volatile u_int *count, u_int n) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
old = *count; | old = *count; | ||||
for (;;) { | for (;;) { | ||||
if (REFCOUNT_COUNT(old) <= n) | if (old <= n) | ||||
return (false); | return (false); | ||||
if (__predict_false(REFCOUNT_SATURATED(old))) | if (__predict_false(REFCOUNT_SATURATED(old))) | ||||
return (true); | return (true); | ||||
if (atomic_fcmpset_int(count, &old, old + 1)) | if (atomic_fcmpset_int(count, &old, old + 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_acquire_if_not_zero(volatile u_int *count) | refcount_acquire_if_not_zero(volatile u_int *count) | ||||
{ | { | ||||
return refcount_acquire_if_gt(count, 0); | return (refcount_acquire_if_gt(count, 0)); | ||||
} | } | ||||
static __inline bool | |||||
refcount_releasen(volatile u_int *count, u_int n) | |||||
{ | |||||
u_int old; | |||||
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, | |||||
("refcount_releasen: n=%u too large", n)); | |||||
atomic_thread_fence_rel(); | |||||
old = atomic_fetchadd_int(count, -n); | |||||
if (__predict_false(old < n || REFCOUNT_SATURATED(old))) { | |||||
_refcount_update_saturated(count); | |||||
return (false); | |||||
} | |||||
if (old > n) | |||||
return (false); | |||||
/* | |||||
* Last reference. Signal the user to call the destructor. | |||||
* | |||||
* Ensure that the destructor sees all updates. This synchronizes with | |||||
* release fences from all routines which drop the count. | |||||
kib: I failed to see something equivalent for blockcounts, Is semantic comparable ? | |||||
Done Inline ActionsIt is similar. blockcount_release() issues a release fence to ensure that all stores are visible before the counter reaches 0. blockcount_sleep() loads the count with an acquire fence before returning. markj: It is similar. blockcount_release() issues a release fence to ensure that all stores are… | |||||
Not Done Inline ActionsActually no, it is not. blockcount_release does not return a val, and blockcount_sleep/wait are mandatory. kib: Actually no, it is not. blockcount_release does not return a val, and blockcount_sleep/wait… | |||||
*/ | |||||
atomic_thread_fence_acq(); | |||||
return (true); | |||||
} | |||||
static __inline bool | |||||
refcount_release(volatile u_int *count) | |||||
{ | |||||
return (refcount_releasen(count, 1)); | |||||
} | |||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_release_if_gt(volatile u_int *count, u_int n) | refcount_release_if_gt(volatile u_int *count, u_int n) | ||||
{ | { | ||||
u_int old; | u_int old; | ||||
KASSERT(n > 0, | KASSERT(n > 0, | ||||
("refcount_release_if_gt: Use refcount_release for final ref")); | ("refcount_release_if_gt: Use refcount_release for final ref")); | ||||
old = *count; | old = *count; | ||||
for (;;) { | for (;;) { | ||||
if (REFCOUNT_COUNT(old) <= n) | if (old <= n) | ||||
return (false); | return (false); | ||||
if (__predict_false(REFCOUNT_SATURATED(old))) | if (__predict_false(REFCOUNT_SATURATED(old))) | ||||
return (true); | return (true); | ||||
/* | /* | ||||
* Paired with acquire fence in refcount_release_last. | * Paired with acquire fence in refcount_releasen(). | ||||
*/ | */ | ||||
if (atomic_fcmpset_rel_int(count, &old, old - 1)) | if (atomic_fcmpset_rel_int(count, &old, old - 1)) | ||||
return (true); | return (true); | ||||
} | } | ||||
} | } | ||||
static __inline __result_use_check bool | static __inline __result_use_check bool | ||||
refcount_release_if_not_last(volatile u_int *count) | refcount_release_if_not_last(volatile u_int *count) | ||||
{ | { | ||||
return refcount_release_if_gt(count, 1); | return (refcount_release_if_gt(count, 1)); | ||||
} | } | ||||
#endif /* ! __SYS_REFCOUNT_H__ */ | #endif /* !__SYS_REFCOUNT_H__ */ | ||||
Done Inline Actionsblockcount overflow? mjg: blockcount overflow? |
I failed to see something equivalent for blockcounts, Is semantic comparable ?