Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c +++ sys/kern/kern_synch.c @@ -335,72 +335,65 @@ } /* - * Potentially release the last reference for refcount. Check for - * unlikely conditions and signal the caller as to whether it was - * the final ref. + * Release the last reference of the given refcount and wakeup + * waiters, if any. */ -bool -refcount_release_last(volatile u_int *count, u_int n, u_int old) +void +refcount_release_last(volatile u_int *count) { - u_int waiter; + u_int old; - waiter = old & REFCOUNT_WAITER; - old = REFCOUNT_COUNT(old); - if (__predict_false(n > old || REFCOUNT_SATURATED(old))) { - /* - * Avoid multiple destructor invocations if underflow occurred. - * This is not perfect since the memory backing the containing - * object may already have been reallocated. - */ - _refcount_update_saturated(count); - return (false); + old = *count; + + for (;;) { + /* check if refcount was bumped again */ + if (REFCOUNT_COUNT(old) != 0) + return; + /* try to assign zero to refcount */ + if (atomic_fcmpset_int(count, &old, 0)) + break; } - /* - * Attempt to atomically clear the waiter bit. Wakeup waiters - * if we are successful. - */ - if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0)) + /* check if there are any waiters */ + if (old & REFCOUNT_WAITER) wakeup(__DEVOLATILE(u_int *, count)); - - /* - * Last reference. Signal the user to call the destructor. - * - * Ensure that the destructor sees all updates. The fence_rel - * at the start of refcount_releasen synchronizes with this fence. - */ - atomic_thread_fence_acq(); - return (true); } /* - * Wait for a refcount wakeup. This does not guarantee that the ref is still - * zero on return and may be subject to transient wakeups. Callers wanting - * a precise answer should use refcount_wait(). + * Wait for refcount to reach zero. This function should be paired with + * refcount_release_last() */ void -refcount_sleep(volatile u_int *count, const char *wmesg, int pri) +refcount_wait(volatile u_int *count, const char *wmesg, int pri) { void *wchan; u_int old; - if (REFCOUNT_COUNT(*count) == 0) - return; + WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, + "refcount_wait_last() can sleep"); + wchan = __DEVOLATILE(void *, count); - sleepq_lock(wchan); - old = *count; for (;;) { - if (REFCOUNT_COUNT(old) == 0) { - sleepq_release(wchan); - return; + old = *count; + if (old == 0) + break; + + sleepq_lock(wchan); + + /* try to get WAIT bit added */ + while ((old & REFCOUNT_WAITER) == 0) { + if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) { + break; + } else if (old == 0) { + sleepq_release(wchan); + return; + } } - if (old & REFCOUNT_WAITER) - break; - if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) - break; + DROP_GIANT(); + sleepq_add(wchan, NULL, wmesg, 0, 0); + sleepq_wait(wchan, pri); + PICKUP_GIANT(); } - sleepq_add(wchan, NULL, wmesg, 0, 0); - sleepq_wait(wchan, pri); } /* Index: sys/sys/refcount.h =================================================================== --- sys/sys/refcount.h +++ sys/sys/refcount.h @@ -45,8 +45,8 @@ #define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0) #define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) -bool refcount_release_last(volatile u_int *count, u_int n, u_int old); -void refcount_sleep(volatile u_int *count, const char *wmesg, int prio); +void refcount_release_last(volatile u_int *count); +void refcount_wait(volatile u_int *count, const char *wmesg, int prio); /* * Attempt to handle reference count overflow and underflow. Force the counter @@ -118,10 +118,29 @@ atomic_thread_fence_rel(); old = atomic_fetchadd_int(count, -n); - if (__predict_false(n >= REFCOUNT_COUNT(old) || - REFCOUNT_SATURATED(old))) - return (refcount_release_last(count, n, old)); - return (false); + if (__predict_false(n > REFCOUNT_COUNT(old) || + REFCOUNT_SATURATED(old))) { + _refcount_update_saturated(count); + return (false); + } else if (__predict_false(n != REFCOUNT_COUNT(old))) { + return (false); + } + + /* + * Last reference. Signal the user to call the destructor. + * + * Ensure that the destructor sees all updates. The fence_rel + * at the start of refcount_releasen synchronizes with this + * fence. + */ + atomic_thread_fence_acq(); + + /* check if there is a waiter */ + if (__predict_false(old & REFCOUNT_WAITER)) + refcount_release_last(count); + + /* Last reference */ + return (true); } static __inline bool @@ -131,14 +150,6 @@ return (refcount_releasen(count, 1)); } -static __inline void -refcount_wait(volatile u_int *count, const char *wmesg, int prio) -{ - - while (*count != 0) - refcount_sleep(count, wmesg, prio); -} - /* * This functions returns non-zero if the refcount was * incremented. Else zero is returned. @@ -160,19 +171,43 @@ } static __inline __result_use_check bool -refcount_release_if_not_last(volatile u_int *count) +refcount_releasen_if_not_last(volatile u_int *count, u_int n) { u_int old; + KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, + ("refcount_releasen_if_not_last: n=%u too large", n)); + + atomic_thread_fence_rel(); old = *count; for (;;) { - if (REFCOUNT_COUNT(old) == 1) + if (__predict_false(n > REFCOUNT_COUNT(old) || + REFCOUNT_SATURATED(old))) { + _refcount_update_saturated(count); + return (true); + } else if (__predict_false(REFCOUNT_COUNT(old) == n)) { + /* + * Last reference. Signal the user to call the + * destructor. + * + * Ensure that the destructor sees all + * updates. The fence_rel at the start of + * refcount_release_if_not_last() synchronizes + * with this fence. + */ + atomic_thread_fence_acq(); return (false); - if (__predict_false(REFCOUNT_SATURATED(old))) + } else if (atomic_fcmpset_int(count, &old, old - n)) { return (true); - if (atomic_fcmpset_int(count, &old, old - 1)) - return (true); + } } } +static __inline __result_use_check bool +refcount_release_if_not_last(volatile u_int *count) +{ + + return (refcount_releasen_if_not_last(count, 1)); +} + #endif /* ! __SYS_REFCOUNT_H__ */