Index: sys/fs/devfs/devfs.h =================================================================== --- sys/fs/devfs/devfs.h +++ sys/fs/devfs/devfs.h @@ -153,6 +153,7 @@ struct timespec de_ctime; struct vnode *de_vnode; char *de_symlink; + int de_usecount; }; struct devfs_mount { @@ -202,6 +203,9 @@ struct devfs_dirent *devfs_find(struct devfs_dirent *, const char *, int, int); +void devfs_ctty_ref(struct vnode *); +void devfs_ctty_unref(struct vnode *); + #endif /* _KERNEL */ #endif /* !_FS_DEVFS_DEVFS_H_ */ Index: sys/fs/devfs/devfs_vnops.c =================================================================== --- sys/fs/devfs/devfs_vnops.c +++ sys/fs/devfs/devfs_vnops.c @@ -222,6 +222,88 @@ devfs_fpdrop(fp); } +static void +devfs_usecount_add(struct vnode *vp) +{ + struct devfs_dirent *de; + struct cdev *dev; + + VI_LOCK(vp); + if (VN_IS_DOOMED(vp)) { + VI_UNLOCK(vp); + return; + } + + de = vp->v_data; + dev = vp->v_rdev; + MPASS(de != NULL); + MPASS(dev != NULL); + dev_lock(); + dev->si_usecount++; + de->de_usecount++; + dev_unlock(); + VI_UNLOCK(vp); +} + +static void +devfs_usecount_sub_locked(struct vnode *vp) +{ + struct devfs_dirent *de; + struct cdev *dev; + + ASSERT_VI_LOCKED(vp, __func__); + + de = vp->v_data; + dev = vp->v_rdev; + if (de == NULL) + return; + if (dev == NULL) { + MPASS(de->de_usecount == 0); + return; + } + dev_lock(); + if (dev->si_usecount < de->de_usecount) + panic("%s: si_usecount underflow for dev %p " + "(has %ld, dirent has %d)\n", + __func__, dev, dev->si_usecount, de->de_usecount); + if (VN_IS_DOOMED(vp)) { + dev->si_usecount -= de->de_usecount; + de->de_usecount = 0; + } else { + if (de->de_usecount == 0) + panic("%s: de_usecount underflow for dev %p\n", + __func__, dev); + dev->si_usecount--; + de->de_usecount--; + } + dev_unlock(); +} + +static void +devfs_usecount_sub(struct vnode *vp) +{ + + VI_LOCK(vp); + devfs_usecount_sub_locked(vp); + VI_UNLOCK(vp); +} + +void +devfs_ctty_ref(struct vnode *vp) +{ + + vrefact(vp); + devfs_usecount_add(vp); +} + +void +devfs_ctty_unref(struct vnode *vp) +{ + + devfs_usecount_sub(vp); + vrele(vp); +} + /* * On success devfs_populate_vp() returns with dmp->dm_lock held. */ @@ -480,7 +562,6 @@ /* XXX: v_rdev should be protect by vnode lock */ vp->v_rdev = dev; VNPASS(vp->v_usecount == 1, vp); - dev->si_usecount++; /* Special casing of ttys for deadfs. Probably redundant. */ dsw = dev->si_devsw; if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) @@ -562,6 +643,7 @@ struct proc *p; struct cdev *dev = vp->v_rdev; struct cdevsw *dsw; + struct devfs_dirent *de = vp->v_data; int dflags, error, ref, vp_locked; /* @@ -580,7 +662,7 @@ * if the reference count is 2 (this last descriptor * plus the session), release the reference from the session. */ - if (vp->v_usecount == 2 && td != NULL) { + if (de->de_usecount == 2 && td != NULL) { p = td->td_proc; PROC_LOCK(p); if (vp == p->p_session->s_ttyvp) { @@ -590,7 +672,7 @@ if (vp == p->p_session->s_ttyvp) { SESS_LOCK(p->p_session); VI_LOCK(vp); - if (vp->v_usecount == 2 && vcount(vp) == 1 && + if (de->de_usecount == 2 && vcount(vp) == 2 && !VN_IS_DOOMED(vp)) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; @@ -601,7 +683,7 @@ } sx_xunlock(&proctree_lock); if (oldvp != NULL) - vrele(oldvp); + devfs_ctty_unref(oldvp); } else PROC_UNLOCK(p); } @@ -619,8 +701,9 @@ return (ENXIO); dflags = 0; VI_LOCK(vp); - if (vp->v_usecount == 1 && vcount(vp) == 1) + if (de->de_usecount == 1 && vcount(vp) == 1) dflags |= FLASTCLOSE; + devfs_usecount_sub_locked(vp); if (VN_IS_DOOMED(vp)) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; @@ -843,7 +926,7 @@ return (0); } - vrefact(vp); + devfs_ctty_ref(vp); SESS_LOCK(sess); vpold = sess->s_ttyvp; sess->s_ttyvp = vp; @@ -1152,6 +1235,8 @@ return (ENXIO); } + devfs_usecount_add(vp); + vlocked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp); @@ -1171,6 +1256,9 @@ td->td_fpop = fpop; vn_lock(vp, vlocked | LK_RETRY); + if (error != 0) + devfs_usecount_sub(vp); + dev_relthread(dev, ref); if (error != 0) { if (error == ERESTART) @@ -1406,6 +1494,7 @@ struct devfs_dirent *de; vp = ap->a_vp; + devfs_usecount_sub(vp); mtx_lock(&devfs_de_interlock); de = vp->v_data; if (de != NULL) { @@ -1431,8 +1520,6 @@ dev_lock(); dev = vp->v_rdev; vp->v_rdev = NULL; - if (dev != NULL) - dev->si_usecount -= (vp->v_usecount > 0); dev_unlock(); VI_UNLOCK(vp); if (dev != NULL) Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c +++ sys/kern/kern_proc.c @@ -88,6 +88,8 @@ #include #include +#include + #ifdef COMPAT_FREEBSD32 #include #include @@ -858,7 +860,7 @@ VOP_REVOKE(ttyvp, REVOKEALL); VOP_UNLOCK(ttyvp); } - vrele(ttyvp); + devfs_ctty_unref(ttyvp); sx_xlock(&proctree_lock); } } Index: sys/kern/tty.c =================================================================== --- sys/kern/tty.c +++ sys/kern/tty.c @@ -67,6 +67,8 @@ #include #include +#include + #include static MALLOC_DEFINE(M_TTY, "tty", "tty device"); @@ -1256,7 +1258,7 @@ * is either changed or released. */ if (vp != NULL) - vrele(vp); + devfs_ctty_unref(vp); return (0); } Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -108,8 +108,6 @@ static void syncer_shutdown(void *arg, int howto); static int vtryrecycle(struct vnode *vp); static void v_init_counters(struct vnode *); -static void v_incr_devcount(struct vnode *); -static void v_decr_devcount(struct vnode *); static void vgonel(struct vnode *); static void vfs_knllock(void *arg); static void vfs_knlunlock(void *arg); @@ -2788,59 +2786,6 @@ refcount_init(&vp->v_usecount, 1); } -/* - * Increment si_usecount of the associated device, if any. - */ -static void -v_incr_devcount(struct vnode *vp) -{ - - ASSERT_VI_LOCKED(vp, __FUNCTION__); - if (vp->v_type == VCHR && vp->v_rdev != NULL) { - dev_lock(); - vp->v_rdev->si_usecount++; - dev_unlock(); - } -} - -/* - * Decrement si_usecount of the associated device, if any. - * - * The caller is required to hold the interlock when transitioning a VCHR use - * count to zero. This prevents a race with devfs_reclaim_vchr() that would - * leak a si_usecount reference. The vnode lock will also prevent this race - * if it is held while dropping the last ref. - * - * The race is: - * - * CPU1 CPU2 - * devfs_reclaim_vchr - * make v_usecount == 0 - * VI_LOCK - * sees v_usecount == 0, no updates - * vp->v_rdev = NULL; - * ... - * VI_UNLOCK - * VI_LOCK - * v_decr_devcount - * sees v_rdev == NULL, no updates - * - * In this scenario si_devcount decrement is not performed. - */ -static void -v_decr_devcount(struct vnode *vp) -{ - - ASSERT_VOP_LOCKED(vp, __func__); - ASSERT_VI_LOCKED(vp, __FUNCTION__); - if (vp->v_type == VCHR && vp->v_rdev != NULL) { - dev_lock(); - VNPASS(vp->v_rdev->si_usecount > 0, vp); - vp->v_rdev->si_usecount--; - dev_unlock(); - } -} - /* * Grab a particular vnode from the free list, increment its * reference count and lock it. VIRF_DOOMED is set if the vnode @@ -2900,42 +2845,6 @@ return (vget_finish(vp, flags, vs)); } -static int __noinline -vget_finish_vchr(struct vnode *vp) -{ - - VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - - /* - * See the comment in vget_finish before usecount bump. - */ - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - return (0); - } - - VI_LOCK(vp); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - VI_UNLOCK(vp); - return (0); - } - v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); - VI_UNLOCK(vp); - return (0); -} - int vget_finish(struct vnode *vp, int flags, enum vgetstate vs) { @@ -2963,9 +2872,6 @@ if (vs == VGET_USECOUNT) return (0); - if (__predict_false(vp->v_type == VCHR)) - return (vget_finish_vchr(vp)); - /* * We hold the vnode. If the usecount is 0 it will be utilized to keep * the vnode around. Otherwise someone else lended their hold count and @@ -2988,61 +2894,12 @@ * Increase the reference (use) and hold count of a vnode. * This will also remove the vnode from the free list if it is presently free. */ -static void __noinline -vref_vchr(struct vnode *vp, bool interlock) -{ - - /* - * See the comment in vget_finish before usecount bump. - */ - if (!interlock) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT(vp->v_holdcnt > 0, vp, - ("%s: active vnode not held", __func__)); - return; - } - VI_LOCK(vp); - /* - * By the time we get here the vnode might have been doomed, at - * which point the 0->1 use count transition is no longer - * protected by the interlock. Since it can't bounce back to - * VCHR and requires vref semantics, punt it back - */ - if (__predict_false(vp->v_type == VBAD)) { - VI_UNLOCK(vp); - vref(vp); - return; - } - } - VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT(vp->v_holdcnt > 0, vp, - ("%s: active vnode not held", __func__)); - if (!interlock) - VI_UNLOCK(vp); - return; - } - vhold(vp); - v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); - if (!interlock) - VI_UNLOCK(vp); - return; -} - void vref(struct vnode *vp) { int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false(vp->v_type == VCHR)) { - vref_vchr(vp, false); - return; - } - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, @@ -3071,10 +2928,6 @@ ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false(vp->v_type == VCHR)) { - vref_vchr(vp, true); - return; - } vref(vp); } @@ -3215,9 +3068,6 @@ * By releasing the last usecount we take ownership of the hold count which * provides liveness of the vnode, meaning we have to vdrop. * - * If the vnode is of type VCHR we may need to decrement si_usecount, see - * v_decr_devcount for details. - * * For all vnodes we may need to perform inactive processing. It requires an * exclusive lock on the vnode, while it is legal to call here with only a * shared lock (or no locks). If locking the vnode in an expected manner fails, @@ -3238,8 +3088,6 @@ VNPASS(vp->v_holdcnt > 0, vp); VI_LOCK(vp); - if (__predict_false(vp->v_type == VCHR && func != VRELE)) - v_decr_devcount(vp); /* * By the time we got here someone else might have transitioned @@ -3327,28 +3175,9 @@ * Releasing the last use count requires additional processing, see vput_final * above for details. * - * Note that releasing use count without the vnode lock requires special casing - * for VCHR, see v_decr_devcount for details. - * * Comment above each variant denotes lock state on entry and exit. */ -static void __noinline -vrele_vchr(struct vnode *vp) -{ - - if (refcount_release_if_not_last(&vp->v_usecount)) - return; - VI_LOCK(vp); - if (!refcount_release(&vp->v_usecount)) { - VI_UNLOCK(vp); - return; - } - v_decr_devcount(vp); - VI_UNLOCK(vp); - vput_final(vp, VRELE); -} - /* * in: any * out: same as passed in @@ -3358,10 +3187,6 @@ { ASSERT_VI_UNLOCKED(vp, __func__); - if (__predict_false(vp->v_type == VCHR)) { - vrele_vchr(vp); - return; - } if (!refcount_release(&vp->v_usecount)) return; vput_final(vp, VRELE); Index: sys/kern/vfs_syscalls.c =================================================================== --- sys/kern/vfs_syscalls.c +++ sys/kern/vfs_syscalls.c @@ -4197,7 +4197,7 @@ if (error != 0) goto out; } - if (vp->v_usecount > 1 || vcount(vp) > 1) + if (vcount(vp) > 0) VOP_REVOKE(vp, REVOKEALL); out: vput(vp);