Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | |||||
#include <sys/lockf.h> | #include <sys/lockf.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mount.h> | #include <sys/mount.h> | ||||
#include <sys/namei.h> | #include <sys/namei.h> | ||||
#include <sys/pctrie.h> | #include <sys/pctrie.h> | ||||
#include <sys/priv.h> | #include <sys/priv.h> | ||||
#include <sys/reboot.h> | #include <sys/reboot.h> | ||||
#include <sys/refcount.h> | #include <sys/refcount.h> | ||||
#include <sys/rmlock.h> | |||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/sleepqueue.h> | #include <sys/sleepqueue.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/stat.h> | #include <sys/stat.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/syslog.h> | #include <sys/syslog.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
▲ Show 20 Lines • Show All 548 Lines • ▼ Show 20 Lines | |||||
* Attempt to lock A (instead of vp_crossmp) while D is held would | * Attempt to lock A (instead of vp_crossmp) while D is held would | ||||
* violate the global order, causing deadlocks. | * violate the global order, causing deadlocks. | ||||
* | * | ||||
* dounmount() locks B while F is drained. | * dounmount() locks B while F is drained. | ||||
*/ | */ | ||||
int | int | ||||
vfs_busy(struct mount *mp, int flags) | vfs_busy(struct mount *mp, int flags) | ||||
{ | { | ||||
struct rm_priotracker tracker; | |||||
MPASS((flags & ~MBF_MASK) == 0); | MPASS((flags & ~MBF_MASK) == 0); | ||||
CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); | CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); | ||||
if (vfs_op_thread_enter(mp, &tracker)) { | |||||
MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); | |||||
MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); | |||||
MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); | |||||
MNT_REF_FASTPATH(mp); | |||||
atomic_add_int(&mp->mnt_lockref, 1); | |||||
if (flags & MBF_MNTLSTLOCK) | |||||
kib: != 0 | |||||
mjgAuthorUnsubmitted Done Inline ActionsThis func already does not do it so I did not change it. mjg: This func already does not do it so I did not change it. | |||||
mtx_unlock(&mountlist_mtx); | |||||
vfs_op_thread_exit(mp, &tracker); | |||||
return (0); | |||||
} | |||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REF(mp); | MNT_REF(mp); | ||||
/* | /* | ||||
* If mount point is currently being unmounted, sleep until the | * If mount point is currently being unmounted, sleep until the | ||||
* mount point fate is decided. If thread doing the unmounting fails, | * mount point fate is decided. If thread doing the unmounting fails, | ||||
* it will clear MNTK_UNMOUNT flag before waking us up, indicating | * it will clear MNTK_UNMOUNT flag before waking us up, indicating | ||||
* that this mount point has survived the unmount attempt and vfs_busy | * that this mount point has survived the unmount attempt and vfs_busy | ||||
* should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE | * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE | ||||
Show All 16 Lines | while (mp->mnt_kern_flag & MNTK_UNMOUNT) { | ||||
mp->mnt_kern_flag |= MNTK_MWAIT; | mp->mnt_kern_flag |= MNTK_MWAIT; | ||||
msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); | msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); | ||||
if (flags & MBF_MNTLSTLOCK) | if (flags & MBF_MNTLSTLOCK) | ||||
mtx_lock(&mountlist_mtx); | mtx_lock(&mountlist_mtx); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
} | } | ||||
if (flags & MBF_MNTLSTLOCK) | if (flags & MBF_MNTLSTLOCK) | ||||
mtx_unlock(&mountlist_mtx); | mtx_unlock(&mountlist_mtx); | ||||
mp->mnt_lockref++; | atomic_add_int(&mp->mnt_lockref, 1); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Free a busy filesystem. | * Free a busy filesystem. | ||||
*/ | */ | ||||
void | void | ||||
vfs_unbusy(struct mount *mp) | vfs_unbusy(struct mount *mp) | ||||
{ | { | ||||
struct rm_priotracker tracker; | |||||
int c; | |||||
CTR2(KTR_VFS, "%s: mp %p", __func__, mp); | CTR2(KTR_VFS, "%s: mp %p", __func__, mp); | ||||
if (vfs_op_thread_enter(mp, &tracker)) { | |||||
MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); | |||||
c = atomic_fetchadd_int(&mp->mnt_lockref, -1) - 1; | |||||
KASSERT(c >= 0, ("%s: negative mnt_lockref %d\n", __func__, c)); | |||||
MNT_REL_FASTPATH(mp); | |||||
vfs_op_thread_exit(mp, &tracker); | |||||
return; | |||||
} | |||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REL(mp); | MNT_REL(mp); | ||||
KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); | c = atomic_fetchadd_int(&mp->mnt_lockref, -1) - 1; | ||||
mp->mnt_lockref--; | KASSERT(c >= 0, ("%s: negative mnt_lockref %d\n", __func__, c)); | ||||
if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { | if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { | ||||
MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); | MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); | ||||
CTR1(KTR_VFS, "%s: waking up waiters", __func__); | CTR1(KTR_VFS, "%s: waking up waiters", __func__); | ||||
mp->mnt_kern_flag &= ~MNTK_DRAINING; | mp->mnt_kern_flag &= ~MNTK_DRAINING; | ||||
wakeup(&mp->mnt_lockref); | wakeup(&mp->mnt_lockref); | ||||
} | } | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 5,133 Lines • Show Last 20 Lines |
!= 0