Index: sys/amd64/include/atomic.h =================================================================== --- sys/amd64/include/atomic.h +++ sys/amd64/include/atomic.h @@ -679,4 +679,6 @@ #endif /* !WANT_FUNCTIONS */ +#include + #endif /* !_MACHINE_ATOMIC_H_ */ Index: sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c +++ sys/cddl/contrib/opensolaris/uts/common/fs/vnode.c @@ -86,7 +86,7 @@ vn_rele_async(vnode_t *vp, taskq_t *taskq) { VERIFY(vp->v_count > 0); - if (refcount_release_if_not_last(&vp->v_usecount)) { + if (atomic_dec_int_if_not_last(&vp->v_usecount)) { return; } VERIFY(taskq_dispatch((taskq_t *)taskq, Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -72,7 +72,6 @@ #include #include #include -#include #include #include #include @@ -1161,7 +1160,7 @@ freevnodes--; vp->v_iflag &= ~VI_FREE; VNODE_REFCOUNT_FENCE_REL(); - refcount_acquire(&vp->v_holdcnt); + atomic_add_int(&vp->v_holdcnt, 1); mtx_unlock(&vnode_free_list_mtx); VI_UNLOCK(vp); @@ -2656,8 +2655,8 @@ vp, ("%s called for an initialized vnode", __FUNCTION__)); ASSERT_VI_UNLOCKED(vp, __FUNCTION__); - refcount_init(&vp->v_holdcnt, 1); - refcount_init(&vp->v_usecount, 1); + atomic_store_int(&vp->v_holdcnt, 1); + atomic_store_int(&vp->v_usecount, 1); } /* @@ -2711,7 +2710,7 @@ enum vgetstate vs; if (__predict_true(vp->v_type != VCHR)) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (atomic_inc_int_if_not_zero(&vp->v_usecount)) { vs = VGET_USECOUNT; } else { _vhold(vp, interlock); @@ -2725,7 +2724,7 @@ vs = VGET_HOLDCNT; } else { v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + atomic_add_int(&vp->v_usecount, 1); vs = VGET_USECOUNT; } if (!interlock) @@ -2793,12 +2792,12 @@ * we have to drop ours. */ if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + atomic_inc_int_if_not_zero(&vp->v_usecount)) { #ifdef INVARIANTS int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); #else - refcount_release(&vp->v_holdcnt); + atomic_subtract_int(&vp->v_holdcnt, 1); #endif VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, @@ -2820,12 +2819,12 @@ * ourselves in the same spot. */ if (vp->v_type != VCHR) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (atomic_inc_int_if_not_zero(&vp->v_usecount)) { #ifdef INVARIANTS int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); #else - refcount_release(&vp->v_holdcnt); + atomic_subtract_int(&vp->v_holdcnt, 1); #endif VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, @@ -2836,7 +2835,7 @@ } } else { if (vp->v_usecount > 0) - refcount_release(&vp->v_holdcnt); + atomic_subtract_int(&vp->v_holdcnt, 1); } if ((vp->v_iflag & VI_OWEINACT) == 0) { oweinact = 0; @@ -2846,7 +2845,7 @@ VNODE_REFCOUNT_FENCE_REL(); } v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + atomic_add_int(&vp->v_usecount, 1); if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && (flags & LK_NOWAIT) == 0) vinactive(vp, curthread); @@ -2865,7 +2864,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + atomic_inc_int_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2885,7 +2884,7 @@ ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + atomic_inc_int_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2900,7 +2899,7 @@ VNODE_REFCOUNT_FENCE_REL(); } v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); + atomic_add_int(&vp->v_usecount, 1); } void @@ -2918,7 +2917,7 @@ int old = atomic_fetchadd_int(&vp->v_usecount, 1); VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); #else - refcount_acquire(&vp->v_usecount); + atomic_add_int(&vp->v_usecount, 1); #endif } @@ -2989,7 +2988,7 @@ * count which provides liveness of the vnode, in which case we * have to vdrop. */ - if (!refcount_release(&vp->v_usecount)) + if (atomic_fetchadd_int(&vp->v_usecount, -1) - 1 > 0) return; VI_LOCK(vp); /* @@ -3003,7 +3002,7 @@ } else { VI_LOCK(vp); v_decr_devcount(vp); - if (!refcount_release(&vp->v_usecount)) { + if (atomic_fetchadd_int(&vp->v_usecount, -1) - 1 > 0) { VI_UNLOCK(vp); return; } @@ -3114,7 +3113,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); if (!locked) { - if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { + if (atomic_inc_int_if_not_zero(&vp->v_holdcnt)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("_vhold: vnode with holdcnt is free")); @@ -3123,7 +3122,7 @@ VI_LOCK(vp); } if ((vp->v_iflag & VI_FREE) == 0) { - refcount_acquire(&vp->v_holdcnt); + atomic_add_int(&vp->v_holdcnt, 1); if (!locked) VI_UNLOCK(vp); return; @@ -3157,7 +3156,7 @@ TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); mp->mnt_activevnodelistsize++; mtx_unlock(&mp->mnt_listmtx); - refcount_acquire(&vp->v_holdcnt); + atomic_add_int(&vp->v_holdcnt, 1); if (!locked) VI_UNLOCK(vp); } @@ -3201,11 +3200,11 @@ panic("vdrop: wrong holdcnt"); } if (!locked) { - if (refcount_release_if_not_last(&vp->v_holdcnt)) + if (atomic_dec_int_if_not_last(&vp->v_holdcnt)) return; VI_LOCK(vp); } - if (refcount_release(&vp->v_holdcnt) == 0) { + if (atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1 > 0) { VI_UNLOCK(vp); return; } @@ -5846,12 +5845,12 @@ * acquired with vhold(), but that might try to acquire the vnode * interlock, which would be a LOR with the mount vnode list lock. */ - held = refcount_acquire_if_not_zero(&vp->v_holdcnt); + held = atomic_inc_int_if_not_zero(&vp->v_holdcnt); mtx_unlock(&mp->mnt_listmtx); if (!held) goto abort; VI_LOCK(vp); - if (!refcount_release_if_not_last(&vp->v_holdcnt)) { + if (!atomic_dec_int_if_not_last(&vp->v_holdcnt)) { vdropl(vp); goto abort; } Index: sys/sys/atomic_common2.h =================================================================== --- /dev/null +++ sys/sys/atomic_common2.h @@ -0,0 +1,41 @@ +#ifndef _SYS_ATOMIC_COMMON2_H_ +#define _SYS_ATOMIC_COMMON2_H_ + +#ifndef _MACHINE_ATOMIC_H_ +#error do not include this header, use machine/atomic.h +#endif + +#define ATOMIC_MODIFY_UNLESS(TYPE) \ +static __inline __result_use_check bool \ +atomic_inc_##TYPE##_if_not_zero(volatile u_##TYPE *var) \ +{ \ + u_##TYPE old; \ + \ + old = *var; \ + for (;;) { \ + if (__predict_false(old == 0)) \ + return (false); \ + if (atomic_fcmpset_##TYPE(var, &old, old + 1)) \ + return (true); \ + } \ +} \ +static __inline __result_use_check bool \ +atomic_dec_##TYPE##_if_not_last(volatile u_##TYPE *var) \ +{ \ + u_##TYPE old; \ + \ + atomic_thread_fence_rel(); \ + old = atomic_load_##TYPE(var); \ + for (;;) { \ + if (__predict_false(old == 1)) \ + return (false); \ + if (atomic_fcmpset_##TYPE(var, &old, old - 1)) \ + return (true); \ + } \ +} + +ATOMIC_MODIFY_UNLESS(short); +ATOMIC_MODIFY_UNLESS(int); +ATOMIC_MODIFY_UNLESS(long); + +#endif