Index: head/sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c =================================================================== --- head/sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c +++ head/sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c @@ -72,12 +72,6 @@ return (xoap); } -static void -vn_rele_inactive(vnode_t *vp) -{ - vrele(vp); -} - /* * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it * asynchronously using a taskq. This can avoid deadlocks caused by re-entering @@ -92,13 +86,10 @@ vn_rele_async(vnode_t *vp, taskq_t *taskq) { VERIFY(vp->v_count > 0); - VI_LOCK(vp); - if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) { - VI_UNLOCK(vp); - VERIFY(taskq_dispatch((taskq_t *)taskq, - (task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0); + if (refcount_release_if_not_last(&vp->v_usecount)) { + vdrop(vp); return; } - refcount_release(&vp->v_usecount); - vdropl(vp); + VERIFY(taskq_dispatch((taskq_t *)taskq, + (task_func_t *)vrele, vp, TQ_SLEEP) != 0); } Index: head/sys/kern/vfs_subr.c =================================================================== --- head/sys/kern/vfs_subr.c +++ head/sys/kern/vfs_subr.c @@ -2455,37 +2455,6 @@ BO_UNLOCK(bo); } -/* - * A temporary hack until refcount_* APIs are sorted out. - */ -static __inline int -vfs_refcount_acquire_if_not_zero(volatile u_int *count) -{ - u_int old; - - old = *count; - for (;;) { - if (old == 0) - return (0); - if (atomic_fcmpset_int(count, &old, old + 1)) - return (1); - } -} - -static __inline int -vfs_refcount_release_if_not_last(volatile u_int *count) -{ - u_int old; - - old = *count; - for (;;) { - if (old == 1) - return (0); - if (atomic_fcmpset_int(count, &old, old - 1)) - return (1); - } -} - static void v_init_counters(struct vnode *vp) { @@ -2524,7 +2493,7 @@ CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && - vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) { + refcount_acquire_if_not_zero(&vp->v_usecount)) { VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, ("vnode with usecount and VI_OWEINACT set")); } else { @@ -2616,7 +2585,7 @@ * Upgrade our holdcnt to a usecount. */ if (vp->v_type == VCHR || - !vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) { + !refcount_acquire_if_not_zero(&vp->v_usecount)) { VI_LOCK(vp); if ((vp->v_iflag & VI_OWEINACT) == 0) { oweinact = 0; @@ -2720,7 +2689,7 @@ CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && - vfs_refcount_release_if_not_last(&vp->v_usecount)) { + refcount_release_if_not_last(&vp->v_usecount)) { if (func == VPUTX_VPUT) VOP_UNLOCK(vp, 0); vdrop(vp); @@ -2836,7 +2805,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (!locked) { - if (vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt)) { + if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("_vhold: vnode with holdcnt is free")); return; @@ -2907,7 +2876,7 @@ if ((int)vp->v_holdcnt <= 0) panic("vdrop: holdcnt %d", vp->v_holdcnt); if (!locked) { - if (vfs_refcount_release_if_not_last(&vp->v_holdcnt)) + if (refcount_release_if_not_last(&vp->v_holdcnt)) return; VI_LOCK(vp); } @@ -5438,12 +5407,12 @@ * acquired with vhold(), but that might try to acquire the vnode * interlock, which would be a LOR with the mount vnode list lock. */ - held = vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt); + held = refcount_acquire_if_not_zero(&vp->v_holdcnt); mtx_unlock(&mp->mnt_listmtx); if (!held) goto abort; VI_LOCK(vp); - if (!vfs_refcount_release_if_not_last(&vp->v_holdcnt)) { + if (!refcount_release_if_not_last(&vp->v_holdcnt)) { vdropl(vp); goto abort; } Index: head/sys/sys/refcount.h =================================================================== --- head/sys/sys/refcount.h +++ head/sys/sys/refcount.h @@ -76,4 +76,35 @@ return (1); } +/* + * A temporary hack until refcount_* APIs are sorted out. + */ +static __inline int +refcount_acquire_if_not_zero(volatile u_int *count) +{ + u_int old; + + old = *count; + for (;;) { + if (old == 0) + return (0); + if (atomic_fcmpset_int(count, &old, old + 1)) + return (1); + } +} + +static __inline int +refcount_release_if_not_last(volatile u_int *count) +{ + u_int old; + + old = *count; + for (;;) { + if (old == 1) + return (0); + if (atomic_fcmpset_int(count, &old, old - 1)) + return (1); + } +} + #endif /* ! __SYS_REFCOUNT_H__ */