diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -1814,7 +1814,7 @@ } static void -dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags) +dounmount_cleanup(struct mount *mp, int mntkflags) { mtx_assert(MNT_MTX(mp), MA_OWNED); @@ -1825,10 +1825,6 @@ } vfs_op_exit_locked(mp); MNT_IUNLOCK(mp); - if (coveredvp != NULL) { - VOP_UNLOCK(coveredvp); - vdrop(coveredvp); - } vn_finished_write(mp); vfs_rel(mp); } @@ -2125,7 +2121,6 @@ struct vnode *coveredvp, *rootvp; int error; uint64_t async_flag; - int mnt_gen_r; unsigned int retries; KASSERT((flags & MNT_DEFERRED) == 0 || @@ -2235,24 +2230,6 @@ if ((flags & MNT_DEFERRED) != 0) vfs_ref(mp); - if ((coveredvp = mp->mnt_vnodecovered) != NULL) { - mnt_gen_r = mp->mnt_gen; - VI_LOCK(coveredvp); - vholdl(coveredvp); - vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); - /* - * Check for mp being unmounted while waiting for the - * covered vnode lock. - */ - if (coveredvp->v_mountedhere != mp || - coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { - VOP_UNLOCK(coveredvp); - vdrop(coveredvp); - vfs_rel(mp); - return (EBUSY); - } - } - vfs_op_enter(mp); vn_start_write(NULL, &mp, V_WAIT); @@ -2260,12 +2237,12 @@ if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || (mp->mnt_flag & MNT_UPDATE) != 0 || !TAILQ_EMPTY(&mp->mnt_uppers)) { - dounmount_cleanup(mp, coveredvp, 0); + dounmount_cleanup(mp, 0); return (EBUSY); } mp->mnt_kern_flag |= MNTK_UNMOUNT; rootvp = vfs_cache_root_clear(mp); - if (coveredvp != NULL) + if ((coveredvp = mp->mnt_vnodecovered) != NULL) vn_seqc_write_begin(coveredvp); if (flags & MNT_NONBUSY) { MNT_IUNLOCK(mp); @@ -2273,7 +2250,7 @@ MNT_ILOCK(mp); if (error != 0) { vn_seqc_write_end(coveredvp); - dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT); + dounmount_cleanup(mp, MNTK_UNMOUNT); if (rootvp != NULL) { vn_seqc_write_end(rootvp); vrele(rootvp); @@ -2297,6 +2274,9 @@ mp->mnt_kern_flag |= MNTK_DRAINING; error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS, "mount drain", 0); + KASSERT((mp->mnt_kern_flag & MNTK_DRAINING) == 0, + ("%s: MNTK_DRAINING not cleared on mp %p @ %s:%d", + __func__, mp, __FILE__, __LINE__)); } MNT_IUNLOCK(mp); KASSERT(mp->mnt_lockref == 0, @@ -2306,6 +2286,16 @@ ("%s: invalid return value for msleep in the drain path @ %s:%d", __func__, __FILE__, __LINE__)); + if (coveredvp != NULL) { + vn_lock(coveredvp, LK_EXCLUSIVE | LK_RETRY); + KASSERT(coveredvp->v_mountedhere == mp, + ("%s: coveredvp->v_mountedhere(%p) != mp(%p) with MNTK_UNMOUNT set", + __func__, coveredvp->v_mountedhere, mp)); + KASSERT(mp->mnt_vnodecovered == coveredvp, + ("%s: mp->mnt_vnodecovered(%p) != coveredvp(%p) with MNTK_UNMOUNT set", + __func__, mp->mnt_vnodecovered, coveredvp)); + } + /* * We want to keep the vnode around so that we can vn_seqc_write_end * after we are done with unmount. Downgrade our reference to a mere @@ -2356,7 +2346,6 @@ if (coveredvp) { vn_seqc_write_end(coveredvp); VOP_UNLOCK(coveredvp); - vdrop(coveredvp); } if (rootvp != NULL) { vn_seqc_write_end(rootvp); @@ -2376,7 +2365,6 @@ vn_seqc_write_end_locked(coveredvp); VI_UNLOCK(coveredvp); VOP_UNLOCK(coveredvp); - vdrop(coveredvp); } mount_devctl_event("UNMOUNT", mp, false); if (rootvp != NULL) {