Index: sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c @@ -85,8 +85,7 @@ void vn_rele_async(vnode_t *vp, taskq_t *taskq) { - VERIFY(vp->v_count > 0); - if (refcount_release_if_not_last(&vp->v_usecount)) { + if (vn_ref_release_if_not_last(vp, v_usecount)) { return; } VERIFY(taskq_dispatch((taskq_t *)taskq, Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c @@ -2079,7 +2079,11 @@ for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; zp = list_next(&zfsvfs->z_all_znodes, zp)) if (zp->z_sa_hdl) { +#ifdef __illumos ASSERT(ZTOV(zp)->v_count >= 0); +#else + VNPASS(vn_ref_read(ZTOV(zp), v_usecount) >= 0, ZTOV(zp)); +#endif zfs_znode_dmu_fini(zp); } mutex_exit(&zfsvfs->z_znodes_lock); Index: sys/fs/msdosfs/msdosfs_vnops.c =================================================================== --- sys/fs/msdosfs/msdosfs_vnops.c +++ sys/fs/msdosfs/msdosfs_vnops.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include Index: sys/fs/nullfs/null_subr.c =================================================================== --- sys/fs/nullfs/null_subr.c +++ sys/fs/nullfs/null_subr.c @@ -45,6 +45,7 @@ #include #include #include +#include #include Index: sys/fs/nullfs/null_vfsops.c =================================================================== --- sys/fs/nullfs/null_vfsops.c +++ sys/fs/nullfs/null_vfsops.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include Index: sys/fs/unionfs/union_vnops.c =================================================================== --- sys/fs/unionfs/union_vnops.c +++ sys/fs/unionfs/union_vnops.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -2777,8 +2777,8 @@ vp, ("%s called for an initialized vnode", __FUNCTION__)); ASSERT_VI_UNLOCKED(vp, __FUNCTION__); - refcount_init(&vp->v_holdcnt, 1); - refcount_init(&vp->v_usecount, 1); + vn_ref_init(vp, v_holdcnt, 1); + vn_ref_init(vp, v_usecount, 1); } /* @@ -2828,7 +2828,7 @@ { enum vgetstate vs; - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { vs = VGET_USECOUNT; } else { vhold(vp); @@ -2857,29 +2857,19 @@ /* * See the comment in vget_finish before usecount bump. */ - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { + vn_ref_release(vp, v_holdcnt); return (0); } VI_LOCK(vp); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { + vn_ref_release(vp, v_holdcnt); VI_UNLOCK(vp); return (0); } v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + vn_ref_acquire(vp, v_usecount); VI_UNLOCK(vp); return (0); } @@ -2887,7 +2877,7 @@ int vget_finish(struct vnode *vp, int flags, enum vgetstate vs) { - int error, old; + int error; VNASSERT((flags & LK_TYPE_MASK) != 0, vp, ("%s: invalid lock operation", __func__)); @@ -2926,16 +2916,8 @@ * the vnode around. Otherwise someone else lended their hold count and * we have to drop ours. */ - old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); - if (old != 0) { -#ifdef INVARIANTS - old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - } + if (vn_ref_acquire_ret(vp, v_usecount) != 0) + vn_ref_release(vp, v_holdcnt); return (0); } @@ -2951,7 +2933,7 @@ * See the comment in vget_finish before usecount bump. */ if (!interlock) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2971,7 +2953,7 @@ } } VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2981,7 +2963,7 @@ } vhold(vp); v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + vn_ref_acquire(vp, v_usecount); if (!interlock) VI_UNLOCK(vp); return; @@ -2990,7 +2972,6 @@ void vref(struct vnode *vp) { - int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (__predict_false(vp->v_type == VCHR)) { @@ -2998,7 +2979,7 @@ return; } - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (vn_ref_acquire_if_not_zero(vp, v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -3008,16 +2989,8 @@ /* * See the comment in vget_finish. */ - old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); - if (old != 0) { -#ifdef INVARIANTS - old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - } + if (vn_ref_acquire_ret(vp, v_usecount) != 0) + vn_ref_release(vp, v_holdcnt); } void @@ -3038,12 +3011,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_usecount, 1); - VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); -#else - refcount_acquire(&vp->v_usecount); -#endif + vn_ref_acquire_nz(vp, v_usecount); } void @@ -3051,12 +3019,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_usecount, n); - VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); -#else - atomic_add_int(&vp->v_usecount, n); -#endif + vn_ref_acquiren_nz(vp, v_usecount, n); } /* @@ -3185,8 +3148,8 @@ else if (func == VPUTX_VPUT) ASSERT_VOP_LOCKED(vp, "vput"); ASSERT_VI_UNLOCKED(vp, __func__); - VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, - ("%s: wrong ref counts", __func__)); + VNPASS(vn_ref_read(vp, v_holdcnt) > 0 && + vn_ref_read(vp, v_usecount) > 0, vp); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); @@ -3199,7 +3162,7 @@ * count which provides liveness of the vnode, in which case we * have to vdrop. */ - if (!refcount_release(&vp->v_usecount)) { + if (!vn_ref_release(vp, v_usecount)) { if (func == VPUTX_VPUT) VOP_UNLOCK(vp); return; @@ -3302,12 +3265,9 @@ vhold(struct vnode *vp) { struct vdbatch *vd; - int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - old = atomic_fetchadd_int(&vp->v_holdcnt, 1); - VNASSERT(old >= 0, vp, ("%s: wrong hold count %d", __func__, old)); - if (old != 0) + if (vn_ref_acquire_ret(vp, v_holdcnt) != 0) return; critical_enter(); vd = DPCPU_PTR(vd); @@ -3329,12 +3289,7 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - atomic_add_int(&vp->v_holdcnt, 1); -#endif + vn_ref_acquire_nz(vp, v_holdcnt); } static void __noinline @@ -3491,7 +3446,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (refcount_release_if_not_last(&vp->v_holdcnt)) + if (vn_ref_release_if_not_last(vp, v_holdcnt)) return; VI_LOCK(vp); vdropl(vp); @@ -3503,7 +3458,7 @@ ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (!refcount_release(&vp->v_holdcnt)) { + if (!vn_ref_release(vp, v_holdcnt)) { VI_UNLOCK(vp); return; } @@ -4081,6 +4036,20 @@ VOP_PRINT(vp); } +#ifdef INVARIANTS +void +vn_ref_assert(void *obj, long count, const char *file, int line, + const char *func) +{ + struct vnode *vp; + + vp = obj; + + vn_printf(vp, "invalid refcount %ld at %s:%d (%s)\n", count, file, line, func); + panic("invalid refcount %ld at %s:%d (%s)\n", count, file, line, func); +} +#endif + #ifdef DDB /* * List all of the locked vnodes in the system. @@ -6256,7 +6225,7 @@ * Since we had a period with no locks held we may be the last * remaining user, in which case there is nothing to do. */ - if (!refcount_release_if_not_last(&vp->v_holdcnt)) + if (!vn_ref_release_if_not_last(vp, v_holdcnt)) goto out_lost; mtx_lock(&mp->mnt_listmtx); return (true); Index: sys/sys/vnode.h =================================================================== --- sys/sys/vnode.h +++ sys/sys/vnode.h @@ -45,6 +45,7 @@ #include #include #include +#include /* * The vnode is the focus of all file activity in UNIX. There is a @@ -167,8 +168,8 @@ daddr_t v_lastw; /* v last write */ int v_clen; /* v length of cur. cluster */ - u_int v_holdcnt; /* I prevents recycling. */ - u_int v_usecount; /* I ref count of users */ + refcntint_t v_holdcnt; /* I prevents recycling. */ + refcntint_t v_usecount; /* I ref count of users */ u_int v_iflag; /* i vnode flags (see below) */ u_int v_vflag; /* v vnode flags */ u_short v_mflag; /* l mnt-specific vnode flags */ @@ -968,6 +969,39 @@ _error; \ }) +#ifdef INVARIANTS +void vn_ref_assert(void *, long, const char *, int, const char *); +#else +#define vn_ref_assert NULL +#endif + +#define vn_ref_init(vp, counter, n) \ + refcntint_init(&vp->counter, n) + +#define vn_ref_read(vp, counter) \ + refcntint_read(&vp->counter) + +#define vn_ref_acquire(vp, counter) \ + _refcntint_acquire(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_ret(vp, counter) \ + _refcntint_acquire_ret(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_nz(vp, counter) \ + _refcntint_acquire_nz(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquiren_nz(vp, counter, n) \ + _refcntint_acquiren_nz(&vp->counter, n, vn_ref_assert, vp) + +#define vn_ref_release(vp, counter) \ + _refcntint_release(&vp->counter, vn_ref_assert, vp) + +#define vn_ref_acquire_if_not_zero(vp, counter) \ + _refcntint_acquire_if_gt(&vp->counter, 0, vn_ref_assert, vp) + +#define vn_ref_release_if_not_last(vp, counter) \ + _refcntint_release_if_gt(&vp->counter, 0, vn_ref_assert, vp) + #include #define VFS_VOP_VECTOR_REGISTER(vnodeops) \ Index: sys/ufs/ffs/ffs_snapshot.c =================================================================== --- sys/ufs/ffs/ffs_snapshot.c +++ sys/ufs/ffs/ffs_snapshot.c @@ -58,6 +58,7 @@ #include #include #include +#include #include Index: sys/ufs/ffs/ffs_vnops.c =================================================================== --- sys/ufs/ffs/ffs_vnops.c +++ sys/ufs/ffs/ffs_vnops.c @@ -82,6 +82,7 @@ #include #include #include +#include #include #include Index: sys/ufs/ufs/ufs_gjournal.c =================================================================== --- sys/ufs/ufs/ufs_gjournal.c +++ sys/ufs/ufs/ufs_gjournal.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include