Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c +++ sys/kern/kern_synch.c @@ -374,33 +374,45 @@ } /* - * Wait for a refcount wakeup. This does not guarantee that the ref is still - * zero on return and may be subject to transient wakeups. Callers wanting - * a precise answer should use refcount_wait(). + * Wait for refcount to reach zero. This function should be paired with + * refcount_release_last() */ void -refcount_sleep(volatile u_int *count, const char *wmesg, int pri) +refcount_wait(volatile u_int *count, const char *wmesg, int pri) { void *wchan; u_int old; - if (REFCOUNT_COUNT(*count) == 0) - return; + WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, + "refcount_wait_last() can sleep"); + wchan = __DEVOLATILE(void *, count); - sleepq_lock(wchan); - old = *count; + for (;;) { - if (REFCOUNT_COUNT(old) == 0) { + old = *count; + if (REFCOUNT_COUNT(old) == 0) + break; + + /* + * Lock sleepqueue first before setting waiter bit, to + * avoid wakeup() call race. The wakeup() function + * locks the same lock before going to sleep. + */ + sleepq_lock(wchan); + + /* + * Try to set waiter bit and ensure the refcount value + * didn't change since the last read. + */ + if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) { + DROP_GIANT(); + sleepq_add(wchan, NULL, wmesg, 0, 0); + sleepq_wait(wchan, pri); + PICKUP_GIANT(); + } else { sleepq_release(wchan); - return; } - if (old & REFCOUNT_WAITER) - break; - if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) - break; } - sleepq_add(wchan, NULL, wmesg, 0, 0); - sleepq_wait(wchan, pri); } /* Index: sys/sys/refcount.h =================================================================== --- sys/sys/refcount.h +++ sys/sys/refcount.h @@ -46,7 +46,7 @@ #define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) bool refcount_release_last(volatile u_int *count, u_int n, u_int old); -void refcount_sleep(volatile u_int *count, const char *wmesg, int prio); +void refcount_wait(volatile u_int *count, const char *wmesg, int prio); /* * Attempt to handle reference count overflow and underflow. Force the counter @@ -131,14 +131,6 @@ return (refcount_releasen(count, 1)); } -static __inline void -refcount_wait(volatile u_int *count, const char *wmesg, int prio) -{ - - while (*count != 0) - refcount_sleep(count, wmesg, prio); -} - /* * This functions returns non-zero if the refcount was * incremented. Else zero is returned. @@ -160,19 +152,43 @@ } static __inline __result_use_check bool -refcount_release_if_not_last(volatile u_int *count) +refcount_releasen_if_not_last(volatile u_int *count, u_int n) { u_int old; + KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, + ("refcount_releasen_if_not_last: n=%u too large", n)); + + atomic_thread_fence_rel(); old = *count; for (;;) { - if (REFCOUNT_COUNT(old) == 1) + if (__predict_false(n > REFCOUNT_COUNT(old) || + REFCOUNT_SATURATED(old))) { + _refcount_update_saturated(count); + return (true); + } else if (__predict_false(REFCOUNT_COUNT(old) == n)) { + /* + * Last reference. Signal the user to call the + * destructor. + * + * Ensure that the destructor sees all + * updates. The fence_rel at the start of + * refcount_release_if_not_last() synchronizes + * with this fence. + */ + atomic_thread_fence_acq(); return (false); - if (__predict_false(REFCOUNT_SATURATED(old))) + } else if (atomic_fcmpset_int(count, &old, old - n)) { return (true); - if (atomic_fcmpset_int(count, &old, old - 1)) - return (true); + } } } +static __inline __result_use_check bool +refcount_release_if_not_last(volatile u_int *count) +{ + + return (refcount_releasen_if_not_last(count, 1)); +} + #endif /* ! __SYS_REFCOUNT_H__ */