Index: sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c @@ -85,8 +85,7 @@ void vn_rele_async(vnode_t *vp, taskq_t *taskq) { - VERIFY(vp->v_count > 0); - if (refcount_release_if_not_last(&vp->v_usecount)) { + if (vn_ref_release_if_not_last(vp, v_usecount)) { return; } VERIFY(taskq_dispatch((taskq_t *)taskq, Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c @@ -2079,7 +2079,11 @@ for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; zp = list_next(&zfsvfs->z_all_znodes, zp)) if (zp->z_sa_hdl) { +#ifdef __illumos ASSERT(ZTOV(zp)->v_count >= 0); +#else + VNPASS(vn_ref_read(ZTOV(zp), v_usecount) >= 0, ZTOV(zp)); +#endif zfs_znode_dmu_fini(zp); } mutex_exit(&zfsvfs->z_znodes_lock); Index: sys/fs/msdosfs/msdosfs_vnops.c =================================================================== --- sys/fs/msdosfs/msdosfs_vnops.c +++ sys/fs/msdosfs/msdosfs_vnops.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include Index: sys/fs/nullfs/null_subr.c =================================================================== --- sys/fs/nullfs/null_subr.c +++ sys/fs/nullfs/null_subr.c @@ -45,6 +45,7 @@ #include #include #include +#include #include Index: sys/fs/nullfs/null_vfsops.c =================================================================== --- sys/fs/nullfs/null_vfsops.c +++ sys/fs/nullfs/null_vfsops.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include Index: sys/fs/unionfs/union_vnops.c =================================================================== --- sys/fs/unionfs/union_vnops.c +++ sys/fs/unionfs/union_vnops.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -2777,8 +2777,8 @@ vp, ("%s called for an initialized vnode", __FUNCTION__)); ASSERT_VI_UNLOCKED(vp, __FUNCTION__); - refcount_init(&vp->v_holdcnt, 1); - refcount_init(&vp->v_usecount, 1); + vn_ref_init(vp, v_holdcnt, 1); + vn_ref_init(vp, v_usecount, 1); } /* @@ -2851,7 +2851,7 @@ { enum vgetstate vs; - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { vs = VGET_USECOUNT; } else { vhold(vp); @@ -2880,29 +2880,19 @@ /* * See the comment in vget_finish before usecount bump. */ - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { + vn_ref_release(vp, v_holdcnt); return (0); } VI_LOCK(vp); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { + vn_ref_release(vp, v_holdcnt); VI_UNLOCK(vp); return (0); } v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + vn_ref_acquire(vp, v_usecount); VI_UNLOCK(vp); return (0); } @@ -2910,7 +2900,7 @@ int vget_finish(struct vnode *vp, int flags, enum vgetstate vs) { - int error, old; + int error; if ((flags & LK_INTERLOCK) != 0) ASSERT_VI_LOCKED(vp, __func__); @@ -2941,16 +2931,8 @@ * the vnode around. Otherwise someone else lended their hold count and * we have to drop ours. */ - old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); - if (old != 0) { -#ifdef INVARIANTS - old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - } + if (vn_ref_acquire_ret(vp, v_usecount) != 0) + vn_ref_release(vp, v_holdcnt); return (0); } @@ -2966,7 +2948,7 @@ * See the comment in vget_finish before usecount bump. */ if (!interlock) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2986,7 +2968,7 @@ } } VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2996,7 +2978,7 @@ } vhold(vp); v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + vn_ref_acquire(vp, v_usecount); if (!interlock) VI_UNLOCK(vp); return; @@ -3005,7 +2987,6 @@ void vref(struct vnode *vp) { - int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (__predict_false(vp->v_type == VCHR)) { @@ -3013,7 +2994,7 @@ return; } - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -3023,16 +3004,8 @@ /* * See the comment in vget_finish. */ - old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); - if (old != 0) { -#ifdef INVARIANTS - old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - } + if (vn_ref_acquire_ret(vp, v_usecount) != 0) + vn_ref_release(vp, v_holdcnt); } void @@ -3053,12 +3026,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); -#else - refcount_acquire(&vp->v_usecount); -#endif + vn_ref_acquire_nz(vp, v_usecount); } void @@ -3066,12 +3034,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_usecount, n); - VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); -#else - atomic_add_int(&vp->v_usecount, n); -#endif + vn_ref_acquiren_nz(vp, v_usecount, n); } /* @@ -3307,10 +3270,10 @@ vrele_vchr(struct vnode *vp) { - if (refcount_release_if_not_last(&vp->v_usecount)) + if (vn_ref_release_if_not_last(vp, v_usecount)) return; VI_LOCK(vp); - if (!refcount_release(&vp->v_usecount)) { + if (!vn_ref_release(vp, v_usecount)) { VI_UNLOCK(vp); return; } @@ -3332,7 +3295,7 @@ vrele_vchr(vp); return; } - if (!refcount_release(&vp->v_usecount)) + if (!vn_ref_release(vp, v_usecount)) return; vput_final(vp, VRELE); } @@ -3347,7 +3310,7 @@ ASSERT_VOP_LOCKED(vp, __func__); ASSERT_VI_UNLOCKED(vp, __func__); - if (!refcount_release(&vp->v_usecount)) { + if (!vn_ref_release(vp, v_usecount)) { VOP_UNLOCK(vp); return; } @@ -3364,7 +3327,7 @@ ASSERT_VOP_LOCKED(vp, __func__); ASSERT_VI_UNLOCKED(vp, __func__); - if (!refcount_release(&vp->v_usecount)) + if (!vn_ref_release(vp, v_usecount)) return; vput_final(vp, VUNREF); } @@ -3373,12 +3336,9 @@ vhold(struct vnode *vp) { struct vdbatch *vd; - int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - old = atomic_fetchadd_int(&vp->v_holdcnt, 1); - VNASSERT(old >= 0, vp, ("%s: wrong hold count %d", __func__, old)); - if (old != 0) + if (vn_ref_acquire_ret(vp, v_holdcnt) != 0) return; critical_enter(); vd = DPCPU_PTR(vd); @@ -3400,12 +3360,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - atomic_add_int(&vp->v_holdcnt, 1); -#endif + vn_ref_acquire_nz(vp, v_holdcnt); } static void __noinline @@ -3562,7 +3517,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (refcount_release_if_not_last(&vp->v_holdcnt)) + if (vn_ref_release_if_not_last(vp, v_holdcnt)) return; VI_LOCK(vp); vdropl(vp); @@ -3574,7 +3529,7 @@ ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (!refcount_release(&vp->v_holdcnt)) { + if (!vn_ref_release(vp, v_holdcnt)) { VI_UNLOCK(vp); return; } @@ -4152,6 +4107,22 @@ VOP_PRINT(vp); } +#ifdef INVARIANTS +void +vn_ref_assert(void *obj, long count, const char *exp, const char *file, + int line, const char *func) +{ + struct vnode *vp; + + vp = obj; + + vn_printf(vp, "refcount assertion %s failed; count %ld at %s:%d (%s)\n", + exp, count, file, line, func); + panic("refcount assertion %s failed; count %ld at %s:%d (%s)\n", + exp, count, file, line, func); +} +#endif + #ifdef DDB /* * List all of the locked vnodes in the system. @@ -6328,7 +6299,7 @@ /* * There is nothing to do if we are the last user. */ - if (!refcount_release_if_not_last(&vp->v_holdcnt)) + if (!vn_ref_release_if_not_last(vp, v_holdcnt)) goto out_lost; mtx_lock(&mp->mnt_listmtx); return (true); Index: sys/sys/vnode.h =================================================================== --- sys/sys/vnode.h +++ sys/sys/vnode.h @@ -45,6 +45,7 @@ #include #include #include +#include /* * The vnode is the focus of all file activity in UNIX. There is a @@ -167,8 +168,8 @@ daddr_t v_lastw; /* v last write */ int v_clen; /* v length of cur. cluster */ - u_int v_holdcnt; /* I prevents recycling. */ - u_int v_usecount; /* I ref count of users */ + refcntint_t v_holdcnt; /* I prevents recycling. */ + refcntint_t v_usecount; /* I ref count of users */ u_int v_iflag; /* i vnode flags (see below) */ u_int v_vflag; /* v vnode flags */ u_short v_mflag; /* l mnt-specific vnode flags */ @@ -968,6 +969,39 @@ _error; \ }) +#ifdef INVARIANTS +void vn_ref_assert(void *, long, const char *, const char *, int, const char *); +#else +#define vn_ref_assert NULL +#endif + +#define vn_ref_init(vp, counter, n) \ + refcntint_init(&vp->counter, n) + +#define vn_ref_read(vp, counter) \ + refcntint_read(&vp->counter) + +#define vn_ref_acquire(vp, counter) \ + _refcntint_acquire(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_ret(vp, counter) \ + _refcntint_acquire_ret(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_nz(vp, counter) \ + _refcntint_acquire_nz(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquiren_nz(vp, counter, n) \ + _refcntint_acquiren_nz(&vp->counter, n, vn_ref_assert, vp) + +#define vn_ref_release(vp, counter) \ + _refcntint_release(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_if_not_zero(vp, counter) \ + _refcntint_acquire_if_gt(&vp->counter, 0, vn_ref_assert, vp) + +#define vn_ref_release_if_not_last(vp, counter) \ + _refcntint_release_if_gt(&vp->counter, 0, vn_ref_assert, vp) + #include #define VFS_VOP_VECTOR_REGISTER(vnodeops) \ Index: sys/ufs/ffs/ffs_snapshot.c =================================================================== --- sys/ufs/ffs/ffs_snapshot.c +++ sys/ufs/ffs/ffs_snapshot.c @@ -58,6 +58,7 @@ #include #include #include +#include #include Index: sys/ufs/ffs/ffs_vnops.c =================================================================== --- sys/ufs/ffs/ffs_vnops.c +++ sys/ufs/ffs/ffs_vnops.c @@ -82,6 +82,7 @@ #include #include #include +#include #include #include Index: sys/ufs/ufs/ufs_gjournal.c =================================================================== --- sys/ufs/ufs/ufs_gjournal.c +++ sys/ufs/ufs/ufs_gjournal.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include