Index: head/sys/kern/kern_synch.c =================================================================== --- head/sys/kern/kern_synch.c +++ head/sys/kern/kern_synch.c @@ -381,15 +381,21 @@ * a precise answer should use refcount_wait(). */ void -refcount_sleep(volatile u_int *count, const char *wmesg, int pri) +_refcount_sleep(volatile u_int *count, struct lock_object *lock, + const char *wmesg, int pri) { void *wchan; u_int old; - if (REFCOUNT_COUNT(*count) == 0) + if (REFCOUNT_COUNT(*count) == 0) { + if (lock != NULL) + LOCK_CLASS(lock)->lc_unlock(lock); return; + } wchan = __DEVOLATILE(void *, count); sleepq_lock(wchan); + if (lock != NULL) + LOCK_CLASS(lock)->lc_unlock(lock); old = *count; for (;;) { if (REFCOUNT_COUNT(old) == 0) { Index: head/sys/sys/refcount.h =================================================================== --- head/sys/sys/refcount.h +++ head/sys/sys/refcount.h @@ -46,7 +46,6 @@ #define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) bool refcount_release_last(volatile u_int *count, u_int n, u_int old); -void refcount_sleep(volatile u_int *count, const char *wmesg, int prio); /* * Attempt to handle reference count overflow and underflow. Force the counter @@ -135,13 +134,29 @@ return (refcount_releasen(count, 1)); } +#ifdef _KERNEL +struct lock_object; +void _refcount_sleep(volatile u_int *count, struct lock_object *, + const char *wmesg, int prio); + static __inline void +refcount_sleep(volatile u_int *count, const char *wmesg, int prio) +{ + + _refcount_sleep(count, NULL, wmesg, prio); +} + +#define refcount_sleep_interlock(count, lock, wmesg, prio) \ + _refcount_sleep((count), (struct lock_object *)(lock), (wmesg), (prio)) + +static __inline void refcount_wait(volatile u_int *count, const char *wmesg, int prio) { while (*count != 0) refcount_sleep(count, wmesg, prio); } +#endif /* * This functions returns non-zero if the refcount was