Index: head/sys/fs/devfs/devfs_vnops.c =================================================================== --- head/sys/fs/devfs/devfs_vnops.c +++ head/sys/fs/devfs/devfs_vnops.c @@ -481,7 +481,7 @@ vp->v_rdev = dev; KASSERT(vp->v_usecount == 1, ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount)); - dev->si_usecount += vp->v_usecount; + dev->si_usecount++; /* Special casing of ttys for deadfs. Probably redundant. */ dsw = dev->si_devsw; if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) @@ -581,7 +581,7 @@ * if the reference count is 2 (this last descriptor * plus the session), release the reference from the session. */ - if (td != NULL) { + if (vp->v_usecount == 2 && td != NULL) { p = td->td_proc; PROC_LOCK(p); if (vp == p->p_session->s_ttyvp) { @@ -591,7 +591,7 @@ if (vp == p->p_session->s_ttyvp) { SESS_LOCK(p->p_session); VI_LOCK(vp); - if (count_dev(dev) == 2 && + if (vp->v_usecount == 2 && vcount(vp) == 1 && (vp->v_iflag & VI_DOOMED) == 0) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; @@ -620,19 +620,19 @@ return (ENXIO); dflags = 0; VI_LOCK(vp); + if (vp->v_usecount == 1 && vcount(vp) == 1) + dflags |= FLASTCLOSE; if (vp->v_iflag & VI_DOOMED) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; } else if (dsw->d_flags & D_TRACKCLOSE) { /* Keep device updated on status. */ - } else if (count_dev(dev) > 1) { + } else if ((dflags & FLASTCLOSE) == 0) { VI_UNLOCK(vp); dev_relthread(dev, ref); return (0); } - if (count_dev(dev) == 1) - dflags |= FLASTCLOSE; - vholdl(vp); + vholdnz(vp); VI_UNLOCK(vp); vp_locked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp, 0); @@ -1425,7 +1425,7 @@ dev = vp->v_rdev; vp->v_rdev = NULL; if (dev != NULL) - dev->si_usecount -= vp->v_usecount; + dev->si_usecount -= (vp->v_usecount > 0); dev_unlock(); VI_UNLOCK(vp); if (dev != NULL) Index: head/sys/kern/vfs_subr.c =================================================================== --- head/sys/kern/vfs_subr.c +++ head/sys/kern/vfs_subr.c @@ -2714,26 +2714,11 @@ { enum vgetstate vs; - if (__predict_true(vp->v_type != VCHR)) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { - vs = VGET_USECOUNT; - } else { - _vhold(vp, interlock); - vs = VGET_HOLDCNT; - } + if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + vs = VGET_USECOUNT; } else { - if (!interlock) - VI_LOCK(vp); - if (vp->v_usecount == 0) { - vholdl(vp); - vs = VGET_HOLDCNT; - } else { - v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); - vs = VGET_USECOUNT; - } - if (!interlock) - VI_UNLOCK(vp); + _vhold(vp, interlock); + vs = VGET_HOLDCNT; } return (vs); } @@ -2796,8 +2781,7 @@ * the vnode around. Otherwise someone else lended their hold count and * we have to drop ours. */ - if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (refcount_acquire_if_not_zero(&vp->v_usecount)) { #ifdef INVARIANTS int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); @@ -2823,24 +2807,19 @@ * See the previous section. By the time we get here we may find * ourselves in the same spot. */ - if (vp->v_type != VCHR) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (refcount_acquire_if_not_zero(&vp->v_usecount)) { #ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; - VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); + int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; + VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); #else - refcount_release(&vp->v_holdcnt); + refcount_release(&vp->v_holdcnt); #endif - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, - ("%s: vnode with usecount and VI_OWEINACT set", - __func__)); - VI_UNLOCK(vp); - return (0); - } - } else { - if (vp->v_usecount > 0) - refcount_release(&vp->v_holdcnt); + VNODE_REFCOUNT_FENCE_ACQ(); + VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, + ("%s: vnode with usecount and VI_OWEINACT set", + __func__)); + VI_UNLOCK(vp); + return (0); } if ((vp->v_iflag & VI_OWEINACT) == 0) { oweinact = 0; @@ -2868,8 +2847,7 @@ ASSERT_VI_UNLOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (refcount_acquire_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2888,8 +2866,7 @@ ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (vp->v_type != VCHR && - refcount_acquire_if_not_zero(&vp->v_usecount)) { + if (refcount_acquire_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, ("%s: active vnode not held", __func__)); @@ -2897,8 +2874,7 @@ ("%s: vnode with usecount and VI_OWEINACT set", __func__)); return; } - if (vp->v_usecount == 0) - vholdl(vp); + vholdl(vp); if ((vp->v_iflag & VI_OWEINACT) != 0) { vp->v_iflag &= ~VI_OWEINACT; VNODE_REFCOUNT_FENCE_REL(); @@ -2912,12 +2888,6 @@ { CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false(vp->v_type == VCHR)) { - VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, - ("%s: wrong ref counts", __func__)); - vref(vp); - return; - } #ifdef INVARIANTS int old = atomic_fetchadd_int(&vp->v_usecount, 1); VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); @@ -2986,31 +2956,22 @@ * We want to hold the vnode until the inactive finishes to * prevent vgone() races. We drop the use count here and the * hold count below when we're done. + * + * If we release the last usecount we take ownership of the hold + * count which provides liveness of the vnode, in which case we + * have to vdrop. */ - if (vp->v_type != VCHR) { - /* - * If we release the last usecount we take ownership of the hold - * count which provides liveness of the vnode, in which case we - * have to vdrop. - */ - if (!refcount_release(&vp->v_usecount)) - return; - VI_LOCK(vp); - /* - * By the time we got here someone else might have transitioned - * the count back to > 0. - */ - if (vp->v_usecount > 0) { - vdropl(vp); - return; - } - } else { - VI_LOCK(vp); - v_decr_devcount(vp); - if (!refcount_release(&vp->v_usecount)) { - VI_UNLOCK(vp); - return; - } + if (!refcount_release(&vp->v_usecount)) + return; + VI_LOCK(vp); + v_decr_devcount(vp); + /* + * By the time we got here someone else might have transitioned + * the count back to > 0. + */ + if (vp->v_usecount > 0) { + vdropl(vp); + return; } if (vp->v_iflag & VI_DOINGINACT) { vdropl(vp); @@ -3737,20 +3698,6 @@ count = vp->v_rdev->si_usecount; dev_unlock(); return (count); -} - -/* - * Same as above, but using the struct cdev *as argument - */ -int -count_dev(struct cdev *dev) -{ - int count; - - dev_lock(); - count = dev->si_usecount; - dev_unlock(); - return(count); } /* Index: head/sys/kern/vfs_syscalls.c =================================================================== --- head/sys/kern/vfs_syscalls.c +++ head/sys/kern/vfs_syscalls.c @@ -4148,7 +4148,7 @@ if (error != 0) goto out; } - if (vcount(vp) > 1) + if (vp->v_usecount > 1 || vcount(vp) > 1) VOP_REVOKE(vp, REVOKEALL); out: vput(vp); Index: head/sys/sys/conf.h =================================================================== --- head/sys/sys/conf.h +++ head/sys/sys/conf.h @@ -256,7 +256,6 @@ #define make_dev_args_init(a) \ make_dev_args_init_impl((a), sizeof(struct make_dev_args)) -int count_dev(struct cdev *_dev); void delist_dev(struct cdev *_dev); void destroy_dev(struct cdev *_dev); int destroy_dev_sched(struct cdev *dev);