Index: sys/cddl/compat/opensolaris/kern/opensolaris_vfs.c =================================================================== --- sys/cddl/compat/opensolaris/kern/opensolaris_vfs.c +++ sys/cddl/compat/opensolaris/kern/opensolaris_vfs.c @@ -242,6 +242,7 @@ if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp)) panic("mount: lost mount"); VOP_UNLOCK(vp, 0); + vfs_fastpath_enable_mp(mp); vfs_unbusy(mp); *vpp = mvp; return (0); Index: sys/kern/vfs_default.c =================================================================== --- sys/kern/vfs_default.c +++ sys/kern/vfs_default.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -587,6 +588,7 @@ struct mount **a_mpp; } */ *ap; { + struct rm_priotracker tracker; struct mount *mp; struct vnode *vp; @@ -603,6 +605,17 @@ mp = vp->v_mount; if (mp == NULL) goto out; + if (vfs_fastpath_enter_mp(mp, &tracker)) { + if (mp != vp->v_mount) { + vfs_fastpath_exit_mp(mp, &tracker); + mp = NULL; + goto out; + } + MNT_REF_FASTPATH(mp); + vfs_fastpath_exit_mp(mp, &tracker); + goto out; + } + MNT_ILOCK(mp); if (mp != vp->v_mount) { MNT_IUNLOCK(mp); Index: sys/kern/vfs_mount.c =================================================================== --- sys/kern/vfs_mount.c +++ sys/kern/vfs_mount.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,9 @@ mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF); lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0); + rm_init(&mp->mnt_rmlock, "struct mount rmlock"); + mp->mnt_ref = 0; + mp->mnt_fastpathdisabled = 1; return (0); } @@ -132,6 +136,7 @@ struct mount *mp; mp = (struct mount *)mem; + rm_destroy(&mp->mnt_rmlock); lockdestroy(&mp->mnt_explock); mtx_destroy(&mp->mnt_listmtx); mtx_destroy(&mp->mnt_mtx); @@ -443,8 +448,15 @@ void vfs_ref(struct mount *mp) { + struct rm_priotracker tracker; CTR2(KTR_VFS, "%s: mp %p", __func__, mp); + if (vfs_fastpath_enter_mp(mp, &tracker)) { + MNT_REF_FASTPATH(mp); + vfs_fastpath_exit_mp(mp, &tracker); + return; + } + MNT_ILOCK(mp); MNT_REF(mp); MNT_IUNLOCK(mp); @@ -453,8 +465,15 @@ void vfs_rel(struct mount *mp) { + struct rm_priotracker tracker; CTR2(KTR_VFS, "%s: mp %p", __func__, mp); + if (vfs_fastpath_enter_mp(mp, &tracker)) { + MNT_REL_FASTPATH(mp); + vfs_fastpath_exit_mp(mp, &tracker); + return; + } + MNT_ILOCK(mp); MNT_REL(mp); MNT_IUNLOCK(mp); @@ -478,7 +497,12 @@ mp->mnt_activevnodelistsize = 0; TAILQ_INIT(&mp->mnt_tmpfreevnodelist); mp->mnt_tmpfreevnodelistsize = 0; - mp->mnt_ref = 0; + if (mp->mnt_ref != 0 || mp->mnt_lockref != 0 || + mp->mnt_writeopcount != 0) + panic("%s: non-zero counters on new mp %p\n", __func__, mp); + if (mp->mnt_fastpathdisabled != 1) + panic("%s:fastpathdisabled should be 1 but %d found\n", __func__, + mp->mnt_fastpathdisabled); (void) vfs_busy(mp, MBF_NOWAIT); atomic_add_acq_int(&vfsp->vfc_refcount, 1); mp->mnt_op = vfsp->vfc_vfsops; @@ -507,6 +531,9 @@ vfs_mount_destroy(struct mount *mp) { + if (mp->mnt_fastpathdisabled == 0) + panic("%s: entered with fast path enabled\n", __func__); + MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_REFEXPIRE; if (mp->mnt_kern_flag & MNTK_MWAIT) { @@ -540,6 +567,11 @@ if (mp->mnt_lockref != 0) panic("vfs_mount_destroy: nonzero lock refcount"); MNT_IUNLOCK(mp); + + if (mp->mnt_fastpathdisabled != 1) + panic("%s:fastpathdisabled should be 1 but %d found\n", __func__, + mp->mnt_fastpathdisabled); + if (mp->mnt_vnodecovered != NULL) vrele(mp->mnt_vnodecovered); #ifdef MAC @@ -951,6 +983,7 @@ vrele(newdp); if ((mp->mnt_flag & MNT_RDONLY) == 0) vfs_allocate_syncvnode(mp); + vfs_fastpath_enable_mp(mp); vfs_unbusy(mp); return (0); } @@ -1019,6 +1052,8 @@ VI_UNLOCK(vp); VOP_UNLOCK(vp, 0); + vfs_fastpath_disable_mp(mp); + MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { MNT_IUNLOCK(mp); @@ -1100,6 +1135,7 @@ else vfs_deallocate_syncvnode(mp); end: + vfs_fastpath_enable_mp(mp); vfs_unbusy(mp); VI_LOCK(vp); vp->v_iflag &= ~VI_MOUNT; @@ -1328,6 +1364,7 @@ mp->mnt_kern_flag &= ~MNTK_MWAIT; wakeup(mp); } + vfs_fastpath_enable_mp_locked(mp); MNT_IUNLOCK(mp); if (coveredvp != NULL) { VOP_UNLOCK(coveredvp, 0); @@ -1336,6 +1373,38 @@ vn_finished_write(mp); } +void +vfs_fastpath_disable_mp(struct mount *mp) +{ + + rm_wlock(&mp->mnt_rmlock); + MNT_ILOCK(mp); + mp->mnt_fastpathdisabled++; + MNT_IUNLOCK(mp); + rm_wunlock(&mp->mnt_rmlock); +} + +void +vfs_fastpath_enable_mp_locked(struct mount *mp) +{ + + mtx_assert(MNT_MTX(mp), MA_OWNED); + + if (mp->mnt_fastpathdisabled <= 0) + panic("%s: invalid fastpathdisabled count %d for mp %p\n", + __func__, mp->mnt_fastpathdisabled, mp); + mp->mnt_fastpathdisabled--; +} + +void +vfs_fastpath_enable_mp(struct mount *mp) +{ + + MNT_ILOCK(mp); + vfs_fastpath_enable_mp_locked(mp); + MNT_IUNLOCK(mp); +} + /* * Do the actual filesystem unmount. */ @@ -1379,6 +1448,8 @@ return (error); } + vfs_fastpath_disable_mp(mp); + vn_start_write(NULL, &mp, V_WAIT | V_MNTREF); MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || @@ -1469,6 +1540,7 @@ mp->mnt_kern_flag &= ~MNTK_MWAIT; wakeup(mp); } + vfs_fastpath_enable_mp_locked(mp); MNT_IUNLOCK(mp); if (coveredvp) VOP_UNLOCK(coveredvp, 0); Index: sys/kern/vfs_mountroot.c =================================================================== --- sys/kern/vfs_mountroot.c +++ sys/kern/vfs_mountroot.c @@ -273,6 +273,7 @@ *mpp = mp; rootdevmp = mp; + vfs_fastpath_enable_mp(mp); } set_rootvnode(); Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -3940,6 +3940,7 @@ mp->mnt_secondary_accwrites); db_printf(" mnt_gjprovider = %s\n", mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); + db_printf(" mnt_fastpathdisabled = %d\n", mp->mnt_fastpathdisabled); db_printf("\n\nList of active vnodes\n"); TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { Index: sys/sys/mount.h =================================================================== --- sys/sys/mount.h +++ sys/sys/mount.h @@ -41,8 +41,10 @@ #include #include #include +#include #include #include +#include #endif /* @@ -226,6 +228,8 @@ struct lock mnt_explock; /* vfs_export walkers lock */ TAILQ_ENTRY(mount) mnt_upper_link; /* (m) we in the all uppers */ TAILQ_HEAD(, mount) mnt_uppers; /* (m) upper mounts over us*/ + struct rmlock mnt_rmlock; + int mnt_fastpathdisabled; /* (i) is fast path disabled */ }; /* @@ -265,15 +269,26 @@ #define MNT_ITRYLOCK(mp) mtx_trylock(&(mp)->mnt_mtx) #define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx) #define MNT_MTX(mp) (&(mp)->mnt_mtx) + +#define MNT_REF_FASTPATH(mp) do { \ + atomic_add_int(&(mp)->mnt_ref, 1); \ +} while (0) +#define MNT_REL_FASTPATH(mp) do { \ + int _c; \ + _c = atomic_fetchadd_int(&(mp)->mnt_ref, -1) - 1; \ + KASSERT(_c >= 0, ("negative mnt_ref %d", _c)); \ +} while (0) + #define MNT_REF(mp) do { \ mtx_assert(MNT_MTX(mp), MA_OWNED); \ - (mp)->mnt_ref++; \ + atomic_add_int(&(mp)->mnt_ref, 1); \ } while (0) #define MNT_REL(mp) do { \ + int _c; \ mtx_assert(MNT_MTX(mp), MA_OWNED); \ - KASSERT((mp)->mnt_ref > 0, ("negative mnt_ref")); \ - (mp)->mnt_ref--; \ - if ((mp)->mnt_ref == 0) \ + _c = atomic_fetchadd_int(&(mp)->mnt_ref, -1) - 1; \ + KASSERT(_c >= 0, ("negative mnt_ref %d", _c)); \ + if (_c == 0) \ wakeup((mp)); \ } while (0) @@ -929,6 +944,25 @@ void syncer_suspend(void); void syncer_resume(void); +void vfs_fastpath_disable_mp(struct mount *); +void vfs_fastpath_enable_mp_locked(struct mount *); +void vfs_fastpath_enable_mp(struct mount *); + +#define vfs_fastpath_enter_mp(mp, tracker) ({ \ + struct mount *_mp = (mp); \ + int _retval; \ + _retval = rm_try_rlock(&(_mp)->mnt_rmlock, tracker); \ + if (__predict_true(_retval != 0)) { \ + if (__predict_false(_mp->mnt_fastpathdisabled)) {\ + vfs_fastpath_exit_mp(_mp, tracker); \ + _retval = 0; \ + } \ + } \ + _retval; \ +}) + +#define vfs_fastpath_exit_mp(mp, tracker) rm_runlock(&(mp)->mnt_rmlock, tracker) + #else /* !_KERNEL */ #include