Index: sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c @@ -72,12 +72,6 @@ return (xoap); } -static void -vn_rele_inactive(vnode_t *vp) -{ - vrele(vp); -} - /* * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it * asynchronously using a taskq. This can avoid deadlocks caused by re-entering @@ -92,13 +86,10 @@ vn_rele_async(vnode_t *vp, taskq_t *taskq) { VERIFY(vp->v_count > 0); - VI_LOCK(vp); - if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) { - VI_UNLOCK(vp); - VERIFY(taskq_dispatch((taskq_t *)taskq, - (task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0); + if (vfs_refcount_release_if_not_last(&vp->v_usecount)) { + vdrop(vp); return; } - refcount_release(&vp->v_usecount); - vdropl(vp); + VERIFY(taskq_dispatch((taskq_t *)taskq, + (task_func_t *)vrele, vp, TQ_SLEEP) != 0); } Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -2455,37 +2455,6 @@ BO_UNLOCK(bo); } -/* - * A temporary hack until refcount_* APIs are sorted out. - */ -static __inline int -vfs_refcount_acquire_if_not_zero(volatile u_int *count) -{ - u_int old; - - old = *count; - for (;;) { - if (old == 0) - return (0); - if (atomic_fcmpset_int(count, &old, old + 1)) - return (1); - } -} - -static __inline int -vfs_refcount_release_if_not_last(volatile u_int *count) -{ - u_int old; - - old = *count; - for (;;) { - if (old == 1) - return (0); - if (atomic_fcmpset_int(count, &old, old - 1)) - return (1); - } -} - static void v_init_counters(struct vnode *vp) { Index: sys/sys/refcount.h =================================================================== --- sys/sys/refcount.h +++ sys/sys/refcount.h @@ -76,4 +76,35 @@ return (1); } +/* + * A temporary hack until refcount_* APIs are sorted out. + */ +static __inline int +vfs_refcount_acquire_if_not_zero(volatile u_int *count) +{ + u_int old; + + old = *count; + for (;;) { + if (old == 0) + return (0); + if (atomic_fcmpset_int(count, &old, old + 1)) + return (1); + } +} + +static __inline int +vfs_refcount_release_if_not_last(volatile u_int *count) +{ + u_int old; + + old = *count; + for (;;) { + if (old == 1) + return (0); + if (atomic_fcmpset_int(count, &old, old - 1)) + return (1); + } +} + #endif /* ! __SYS_REFCOUNT_H__ */