Changeset View
Standalone View
sys/kern/vfs_mount.c
Show First 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | |||||
#include <sys/reboot.h> | #include <sys/reboot.h> | ||||
#include <sys/sbuf.h> | #include <sys/sbuf.h> | ||||
#include <sys/syscallsubr.h> | #include <sys/syscallsubr.h> | ||||
#include <sys/sysproto.h> | #include <sys/sysproto.h> | ||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/sysent.h> | #include <sys/sysent.h> | ||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/taskqueue.h> | |||||
#include <sys/vnode.h> | #include <sys/vnode.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <geom/geom.h> | #include <geom/geom.h> | ||||
#include <machine/stdarg.h> | #include <machine/stdarg.h> | ||||
#include <security/audit/audit.h> | #include <security/audit/audit.h> | ||||
#include <security/mac/mac_framework.h> | #include <security/mac/mac_framework.h> | ||||
#define VFS_MOUNTARG_SIZE_MAX (1024 * 64) | #define VFS_MOUNTARG_SIZE_MAX (1024 * 64) | ||||
static int vfs_domount(struct thread *td, const char *fstype, char *fspath, | static int vfs_domount(struct thread *td, const char *fstype, char *fspath, | ||||
uint64_t fsflags, struct vfsoptlist **optlist); | uint64_t fsflags, struct vfsoptlist **optlist); | ||||
static void free_mntarg(struct mntarg *ma); | static void free_mntarg(struct mntarg *ma); | ||||
static int usermount = 0; | static int usermount = 0; | ||||
SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, | SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, | ||||
"Unprivileged users may mount and unmount file systems"); | "Unprivileged users may mount and unmount file systems"); | ||||
static bool default_autoro = false; | static bool default_autoro = false; | ||||
SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0, | SYSCTL_BOOL(_vfs, OID_AUTO, default_autoro, CTLFLAG_RW, &default_autoro, 0, | ||||
"Retry failed r/w mount as r/o if no explicit ro/rw option is specified"); | "Retry failed r/w mount as r/o if no explicit ro/rw option is specified"); | ||||
static bool recursive_forced_unmount = false; | |||||
SYSCTL_BOOL(_vfs, OID_AUTO, recursive_forced_unmount, CTLFLAG_RW, | |||||
&recursive_forced_unmount, 0, "Recursively unmount stacked upper mounts" | |||||
" when a file system is forcibly unmounted"); | |||||
MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure"); | MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure"); | ||||
MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure"); | MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure"); | ||||
static uma_zone_t mount_zone; | static uma_zone_t mount_zone; | ||||
/* List of mounted filesystems. */ | /* List of mounted filesystems. */ | ||||
struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); | struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); | ||||
/* For any iteration/modification of mountlist */ | /* For any iteration/modification of mountlist */ | ||||
struct mtx_padalign __exclusive_cache_line mountlist_mtx; | struct mtx_padalign __exclusive_cache_line mountlist_mtx; | ||||
MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF); | MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF); | ||||
EVENTHANDLER_LIST_DEFINE(vfs_mounted); | EVENTHANDLER_LIST_DEFINE(vfs_mounted); | ||||
EVENTHANDLER_LIST_DEFINE(vfs_unmounted); | EVENTHANDLER_LIST_DEFINE(vfs_unmounted); | ||||
static void vfs_deferred_unmount(void *arg, int pending); | |||||
static struct task deferred_unmount_task = | |||||
TASK_INITIALIZER(0, vfs_deferred_unmount, NULL);; | |||||
static struct mtx deferred_unmount_lock; | |||||
MTX_SYSINIT(deferred_unmount, &deferred_unmount_lock, "deferred_unmount", | |||||
MTX_DEF); | |||||
static STAILQ_HEAD(, mount) deferred_unmount_list = | |||||
STAILQ_HEAD_INITIALIZER(deferred_unmount_list); | |||||
TASKQUEUE_DEFINE_THREAD(deferred_unmount); | |||||
static void mount_devctl_event(const char *type, struct mount *mp, bool donew); | static void mount_devctl_event(const char *type, struct mount *mp, bool donew); | ||||
/* | /* | ||||
* Global opts, taken by all filesystems | * Global opts, taken by all filesystems | ||||
*/ | */ | ||||
static const char *global_opts[] = { | static const char *global_opts[] = { | ||||
"errmsg", | "errmsg", | ||||
"fstype", | "fstype", | ||||
▲ Show 20 Lines • Show All 386 Lines • ▼ Show 20 Lines | if (vfs_op_thread_enter(mp, mpcpu)) { | ||||
return; | return; | ||||
} | } | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REF(mp); | MNT_REF(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
} | } | ||||
/* | |||||
* Register ump as an upper mount of the mount associated with | |||||
* vnode vp. This registration will be tracked through | |||||
* mount_upper_node upper, which should be allocated by the | |||||
* caller and stored in per-mount data associated with mp. | |||||
* | |||||
* If successful, this function will return the mount associated | |||||
* with vp, and will ensure that it cannot be unmounted until | |||||
* ump has been unregistered as one of its upper mounts. | |||||
* | |||||
* Upon failure this function will return NULL. | |||||
*/ | |||||
struct mount * | struct mount * | ||||
vfs_pin_from_vp(struct vnode *vp) | vfs_register_upper_from_vp(struct vnode *vp, struct mount *ump, | ||||
struct mount_upper_node *upper) | |||||
{ | { | ||||
struct mount *mp; | struct mount *mp; | ||||
mp = atomic_load_ptr(&vp->v_mount); | mp = atomic_load_ptr(&vp->v_mount); | ||||
if (mp == NULL) | if (mp == NULL) | ||||
return (NULL); | return (NULL); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
if (mp != vp->v_mount || (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { | if (mp != vp->v_mount || | ||||
((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) { | |||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
KASSERT(ump != mp, ("upper and lower mounts are identical")); | |||||
upper->mp = ump; | |||||
MNT_REF(mp); | MNT_REF(mp); | ||||
KASSERT(mp->mnt_pinned_count < INT_MAX, | TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link); | ||||
("mount pinned count overflow")); | |||||
++mp->mnt_pinned_count; | |||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
return (mp); | return (mp); | ||||
} | } | ||||
/* | |||||
* Register upper mount ump to receive vnode unlink/reclaim | |||||
* notifications from lower mount mp. This registration will | |||||
* be tracked through mount_upper_node upper, which should be | |||||
* allocated by the caller and stored in per-mount data | |||||
* associated with mp. | |||||
* | |||||
* ump must already be registered as an upper mount of mp | |||||
* through a call to vfs_register_upper_from_vp(). | |||||
*/ | |||||
void | void | ||||
vfs_unpin(struct mount *mp) | vfs_register_for_notification(struct mount *mp, struct mount *ump, | ||||
struct mount_upper_node *upper) | |||||
{ | { | ||||
upper->mp = ump; | |||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
KASSERT(mp->mnt_pinned_count > 0, ("mount pinned count underflow")); | TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link); | ||||
MNT_IUNLOCK(mp); | |||||
} | |||||
static void | |||||
vfs_drain_upper_locked(struct mount *mp) | |||||
{ | |||||
mtx_assert(MNT_MTX(mp), MA_OWNED); | |||||
while (mp->mnt_upper_pending != 0) { | |||||
mp->mnt_kern_flag |= MNTK_UPPER_WAITER; | |||||
msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0); | |||||
} | |||||
} | |||||
/* | |||||
* Undo a previous call to vfs_register_for_notification(). | |||||
* The mount represented by upper must be currently registered | |||||
* as an upper mount for mp. | |||||
*/ | |||||
void | |||||
vfs_unregister_for_notification(struct mount *mp, | |||||
struct mount_upper_node *upper) | |||||
{ | |||||
MNT_ILOCK(mp); | |||||
vfs_drain_upper_locked(mp); | |||||
TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link); | |||||
MNT_IUNLOCK(mp); | |||||
} | |||||
/* | |||||
* Undo a previous call to vfs_register_upper_from_vp(). | |||||
* This must be done before mp can be unmounted. | |||||
*/ | |||||
void | |||||
vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper) | |||||
{ | |||||
MNT_ILOCK(mp); | |||||
KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, | KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, | ||||
("mount pinned with pending unmount")); | ("registered upper with pending unmount")); | ||||
--mp->mnt_pinned_count; | vfs_drain_upper_locked(mp); | ||||
TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link); | |||||
if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 && | |||||
TAILQ_EMPTY(&mp->mnt_uppers)) { | |||||
mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER; | |||||
wakeup(&mp->taskqueue_link); | |||||
} | |||||
MNT_REL(mp); | MNT_REL(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
} | } | ||||
void | void | ||||
vfs_rel(struct mount *mp) | vfs_rel(struct mount *mp) | ||||
{ | { | ||||
struct mount_pcpu *mpcpu; | struct mount_pcpu *mpcpu; | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath, | ||||
mp->mnt_stat.f_owner = cred->cr_uid; | mp->mnt_stat.f_owner = cred->cr_uid; | ||||
strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); | strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); | ||||
mp->mnt_iosize_max = DFLTPHYS; | mp->mnt_iosize_max = DFLTPHYS; | ||||
#ifdef MAC | #ifdef MAC | ||||
mac_mount_init(mp); | mac_mount_init(mp); | ||||
mac_mount_create(cred, mp); | mac_mount_create(cred, mp); | ||||
#endif | #endif | ||||
arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); | arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); | ||||
mp->mnt_upper_pending = 0; | |||||
TAILQ_INIT(&mp->mnt_uppers); | TAILQ_INIT(&mp->mnt_uppers); | ||||
mp->mnt_pinned_count = 0; | TAILQ_INIT(&mp->mnt_notify); | ||||
mp->taskqueue_flags = 0; | |||||
return (mp); | return (mp); | ||||
} | } | ||||
/* | /* | ||||
* Destroy the mount struct previously allocated by vfs_mount_alloc(). | * Destroy the mount struct previously allocated by vfs_mount_alloc(). | ||||
*/ | */ | ||||
void | void | ||||
vfs_mount_destroy(struct mount *mp) | vfs_mount_destroy(struct mount *mp) | ||||
Show All 22 Lines | vfs_mount_destroy(struct mount *mp) | ||||
atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1); | atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1); | ||||
if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { | if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) | TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) | ||||
vn_printf(vp, "dangling vnode "); | vn_printf(vp, "dangling vnode "); | ||||
panic("unmount: dangling vnode"); | panic("unmount: dangling vnode"); | ||||
} | } | ||||
KASSERT(mp->mnt_pinned_count == 0, | KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending")); | ||||
("mnt_pinned_count = %d", mp->mnt_pinned_count)); | |||||
KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers")); | KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers")); | ||||
KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify")); | |||||
if (mp->mnt_nvnodelistsize != 0) | if (mp->mnt_nvnodelistsize != 0) | ||||
panic("vfs_mount_destroy: nonzero nvnodelistsize"); | panic("vfs_mount_destroy: nonzero nvnodelistsize"); | ||||
if (mp->mnt_lazyvnodelistsize != 0) | if (mp->mnt_lazyvnodelistsize != 0) | ||||
panic("vfs_mount_destroy: nonzero lazyvnodelistsize"); | panic("vfs_mount_destroy: nonzero lazyvnodelistsize"); | ||||
if (mp->mnt_lockref != 0) | if (mp->mnt_lockref != 0) | ||||
panic("vfs_mount_destroy: nonzero lock refcount"); | panic("vfs_mount_destroy: nonzero lock refcount"); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
▲ Show 20 Lines • Show All 1,140 Lines • ▼ Show 20 Lines | CPU_FOREACH(cpu) { | ||||
case MNT_COUNT_WRITEOPCOUNT: | case MNT_COUNT_WRITEOPCOUNT: | ||||
sum += mpcpu->mntp_writeopcount; | sum += mpcpu->mntp_writeopcount; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return (sum); | return (sum); | ||||
} | } | ||||
static bool | |||||
deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue) | |||||
{ | |||||
bool enqueued; | |||||
enqueued = false; | |||||
mtx_lock(&deferred_unmount_lock); | |||||
if ((mp->taskqueue_flags & MNT_TASKQUEUE) == 0 || requeue) { | |||||
mp->taskqueue_flags = flags | MNT_TASKQUEUE; | |||||
STAILQ_INSERT_TAIL(&deferred_unmount_list, mp, taskqueue_link); | |||||
enqueued = true; | |||||
} | |||||
mtx_unlock(&deferred_unmount_lock); | |||||
if (enqueued) { | |||||
taskqueue_enqueue(taskqueue_deferred_unmount, | |||||
&deferred_unmount_task); | |||||
} | |||||
return (enqueued); | |||||
} | |||||
/* | /* | ||||
* Taskqueue handler for processing async/recursive unmounts | |||||
*/ | |||||
static void | |||||
vfs_deferred_unmount(void *argi __unused, int pending __unused) | |||||
{ | |||||
STAILQ_HEAD(, mount) local_unmounts; | |||||
uint64_t flags; | |||||
struct mount *mp, *tmp; | |||||
bool unmounted; | |||||
STAILQ_INIT(&local_unmounts); | |||||
mtx_lock(&deferred_unmount_lock); | |||||
STAILQ_CONCAT(&local_unmounts, &deferred_unmount_list); | |||||
mtx_unlock(&deferred_unmount_lock); | |||||
STAILQ_FOREACH_SAFE(mp, &local_unmounts, taskqueue_link, tmp) { | |||||
flags = mp->taskqueue_flags; | |||||
KASSERT((flags & MNT_TASKQUEUE) != 0, | |||||
("taskqueue unmount without MNT_TASKQUEUE")); | |||||
if (dounmount(mp, flags, curthread) != 0) { | |||||
MNT_ILOCK(mp); | |||||
unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0); | |||||
MNT_IUNLOCK(mp); | |||||
if (!unmounted) | |||||
deferred_unmount_enqueue(mp, flags, true); | |||||
else | |||||
vfs_rel(mp); | |||||
} | |||||
} | |||||
} | |||||
/* | |||||
* Do the actual filesystem unmount. | * Do the actual filesystem unmount. | ||||
*/ | */ | ||||
int | int | ||||
dounmount(struct mount *mp, int flags, struct thread *td) | dounmount(struct mount *mp, uint64_t flags, struct thread *td) | ||||
{ | { | ||||
struct mount_upper_node *upper; | |||||
struct vnode *coveredvp, *rootvp; | struct vnode *coveredvp, *rootvp; | ||||
int error; | int error; | ||||
uint64_t async_flag; | uint64_t async_flag; | ||||
int mnt_gen_r; | int mnt_gen_r; | ||||
KASSERT((flags & MNT_TASKQUEUE) == 0 || | |||||
(flags & (MNT_RECURSE | MNT_FORCE)) == (MNT_RECURSE | MNT_FORCE), | |||||
("MNT_TASKQUEUE requires MNT_RECURSE | MNT_FORCE")); | |||||
kib: There should be a blank line before multi-line comment, and after the end of the code which is… | |||||
/* | |||||
* If the caller has explicitly requested the unmount to be handled by | |||||
* the taskqueue and we're not already in taskqueue context, queue | |||||
* up the unmount request and exit. This is done prior to any | |||||
* credential checks; MNT_TASKQUEUE should be used only for kernel- | |||||
* initiated unmounts and will therefore be processed with the | |||||
* (kernel) credentials of the taskqueue thread. Still, callers | |||||
* should be sure this is the behavior they want. | |||||
*/ | |||||
if ((flags & MNT_TASKQUEUE) != 0 && | |||||
taskqueue_member(taskqueue_deferred_unmount, curthread) == 0) { | |||||
if (!deferred_unmount_enqueue(mp, flags, false)) | |||||
vfs_rel(mp); | |||||
return (EINPROGRESS); | |||||
} | |||||
/* | |||||
* Only privileged root, or (if MNT_USER is set) the user that did the | |||||
* original mount is permitted to unmount this filesystem. | |||||
* This check should be made prior to queueing up any recursive | |||||
* unmounts of upper filesystems. Those unmounts will be executed | |||||
* with kernel thread credentials and are expected to succeed, so | |||||
* we must at least ensure the originating context has sufficient | |||||
* privilege to unmount the base filesystem before proceeding with | |||||
* the uppers. | |||||
*/ | |||||
error = vfs_suser(mp, td); | |||||
if (error != 0) { | |||||
KASSERT((flags & MNT_TASKQUEUE) == 0, | |||||
("taskqueue unmount with insufficient privilege")); | |||||
vfs_rel(mp); | |||||
return (error); | |||||
} | |||||
if (recursive_forced_unmount && ((flags & MNT_FORCE) != 0)) | |||||
flags |= MNT_RECURSE; | |||||
if ((flags & MNT_RECURSE) != 0) { | |||||
KASSERT((flags & MNT_FORCE) != 0, | |||||
("MNT_RECURSE requires MNT_FORCE")); | |||||
MNT_ILOCK(mp); | |||||
/* | |||||
* Set MNTK_RECURSE to prevent new upper mounts from being | |||||
* added, and note that an operation on the uppers list is in | |||||
* progress. This will ensure that unregistration from the | |||||
* uppers list, and therefore any pending unmount of the upper | |||||
* FS, can't complete until after we finish walking the list. | |||||
*/ | |||||
mp->mnt_kern_flag |= MNTK_RECURSE; | |||||
mp->mnt_upper_pending++; | |||||
TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) { | |||||
MNT_IUNLOCK(mp); | |||||
vfs_ref(upper->mp); | |||||
if (!deferred_unmount_enqueue(upper->mp, flags, false)) | |||||
vfs_rel(upper->mp); | |||||
MNT_ILOCK(mp); | |||||
} | |||||
mp->mnt_upper_pending--; | |||||
if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && | |||||
mp->mnt_upper_pending == 0) { | |||||
mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; | |||||
wakeup(&mp->mnt_uppers); | |||||
} | |||||
/* | |||||
* If we're not on the taskqueue, wait until the uppers list | |||||
* is drained before proceeding with unmount. Otherwise, if | |||||
* we are on the taskqueue and there are still pending uppers, | |||||
* just re-enqueue on the end of the taskqueue. | |||||
Not Done Inline ActionsSuppose that this delayed unmount becomes popular enough, and one of our upper mounts also has upper mounts. Wouldn't the system deadlock? kib: Suppose that this delayed unmount becomes popular enough, and one of our upper mounts also has… | |||||
Done Inline ActionsThis is the case where the "base" filesystem is being unmounted, and the calling thread has requested for it to be done synchronously, i.e. MNT_TASKQUEUE is not present in flags. The uppers, and the uppers of uppers etc., will all be unmounted from the taskqueue context with MNT_TASKQUEUE and won't block here. I've tested recursively unmounting ufs->unionfs->nullfs and it does work. Given a stacked filesystem hierarchy A->B->C, there could also be a case in which both 'A' and 'B' could be requested to synchronously (and recursively) unmount, but those requests would need to happen in different threads, neither of which could be the taskqueue. Is there a specific scenario you're concerned about? jah: This is the case where the "base" filesystem is being unmounted, and the calling thread has… | |||||
Not Done Inline ActionsI mean, you delegated one unmount to the taskqueue, and now taskqueue executes it. If this delegated unmount needs to delegate another unmount, it schedules a task and sleeps waiting for the task to finish. But it sleeps in the context of the taskqueue which should execute that another unmount, so the new task basically never picked up for execution. kib: I mean, you delegated one unmount to the taskqueue, and now taskqueue executes it. If this… | |||||
Done Inline ActionsThe delegated unmount doesn't sleep though. Since it was issued by the taskqueue, it will have MNT_TASKQUEUE in 'flags' and instead will just requeue itself at the end of recursive unmount queue and return EINPROGRESS. jah: The delegated unmount doesn't sleep though. Since it was issued by the taskqueue, it will have… | |||||
*/ | |||||
if ((flags & MNT_TASKQUEUE) == 0) { | |||||
while (!TAILQ_EMPTY(&mp->mnt_uppers)) { | |||||
mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER; | |||||
msleep(&mp->taskqueue_link, MNT_MTX(mp), 0, | |||||
"umntqw", 0); | |||||
} | |||||
} else if (!TAILQ_EMPTY(&mp->mnt_uppers)) { | |||||
MNT_IUNLOCK(mp); | |||||
deferred_unmount_enqueue(mp, flags, true); | |||||
return (0); | |||||
} | |||||
MNT_IUNLOCK(mp); | |||||
KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty")); | |||||
} | |||||
/* Allow the taskqueue to safely re-enqueue on failure */ | |||||
if ((flags & MNT_TASKQUEUE) != 0) | |||||
vfs_ref(mp); | |||||
if ((coveredvp = mp->mnt_vnodecovered) != NULL) { | if ((coveredvp = mp->mnt_vnodecovered) != NULL) { | ||||
mnt_gen_r = mp->mnt_gen; | mnt_gen_r = mp->mnt_gen; | ||||
VI_LOCK(coveredvp); | VI_LOCK(coveredvp); | ||||
vholdl(coveredvp); | vholdl(coveredvp); | ||||
vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); | vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); | ||||
/* | /* | ||||
* Check for mp being unmounted while waiting for the | * Check for mp being unmounted while waiting for the | ||||
* covered vnode lock. | * covered vnode lock. | ||||
*/ | */ | ||||
if (coveredvp->v_mountedhere != mp || | if (coveredvp->v_mountedhere != mp || | ||||
coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { | coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { | ||||
VOP_UNLOCK(coveredvp); | VOP_UNLOCK(coveredvp); | ||||
vdrop(coveredvp); | vdrop(coveredvp); | ||||
vfs_rel(mp); | vfs_rel(mp); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
} | } | ||||
/* | |||||
* Only privileged root, or (if MNT_USER is set) the user that did the | |||||
* original mount is permitted to unmount this filesystem. | |||||
*/ | |||||
error = vfs_suser(mp, td); | |||||
if (error != 0) { | |||||
if (coveredvp != NULL) { | |||||
VOP_UNLOCK(coveredvp); | |||||
vdrop(coveredvp); | |||||
} | |||||
vfs_rel(mp); | |||||
return (error); | |||||
} | |||||
vfs_op_enter(mp); | vfs_op_enter(mp); | ||||
vn_start_write(NULL, &mp, V_WAIT | V_MNTREF); | vn_start_write(NULL, &mp, V_WAIT | V_MNTREF); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || | if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || | ||||
(mp->mnt_flag & MNT_UPDATE) != 0 || | (mp->mnt_flag & MNT_UPDATE) != 0 || | ||||
mp->mnt_pinned_count != 0) { | !TAILQ_EMPTY(&mp->mnt_uppers)) { | ||||
dounmount_cleanup(mp, coveredvp, 0); | dounmount_cleanup(mp, coveredvp, 0); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
mp->mnt_kern_flag |= MNTK_UNMOUNT; | mp->mnt_kern_flag |= MNTK_UNMOUNT; | ||||
rootvp = vfs_cache_root_clear(mp); | rootvp = vfs_cache_root_clear(mp); | ||||
if (coveredvp != NULL) | if (coveredvp != NULL) | ||||
vn_seqc_write_begin(coveredvp); | vn_seqc_write_begin(coveredvp); | ||||
if (flags & MNT_NONBUSY) { | if (flags & MNT_NONBUSY) { | ||||
▲ Show 20 Lines • Show All 87 Lines • ▼ Show 20 Lines | if (coveredvp) { | ||||
vdrop(coveredvp); | vdrop(coveredvp); | ||||
} | } | ||||
if (rootvp != NULL) { | if (rootvp != NULL) { | ||||
vn_seqc_write_end(rootvp); | vn_seqc_write_end(rootvp); | ||||
vdrop(rootvp); | vdrop(rootvp); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
mtx_lock(&mountlist_mtx); | mtx_lock(&mountlist_mtx); | ||||
TAILQ_REMOVE(&mountlist, mp, mnt_list); | TAILQ_REMOVE(&mountlist, mp, mnt_list); | ||||
mtx_unlock(&mountlist_mtx); | mtx_unlock(&mountlist_mtx); | ||||
EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td); | EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td); | ||||
if (coveredvp != NULL) { | if (coveredvp != NULL) { | ||||
VI_LOCK(coveredvp); | VI_LOCK(coveredvp); | ||||
vn_irflag_unset_locked(coveredvp, VIRF_MOUNTPOINT); | vn_irflag_unset_locked(coveredvp, VIRF_MOUNTPOINT); | ||||
coveredvp->v_mountedhere = NULL; | coveredvp->v_mountedhere = NULL; | ||||
Show All 9 Lines | dounmount(struct mount *mp, uint64_t flags, struct thread *td) | ||||
} | } | ||||
vfs_event_signal(NULL, VQ_UNMOUNT, 0); | vfs_event_signal(NULL, VQ_UNMOUNT, 0); | ||||
if (rootvnode != NULL && mp == rootvnode->v_mount) { | if (rootvnode != NULL && mp == rootvnode->v_mount) { | ||||
vrele(rootvnode); | vrele(rootvnode); | ||||
rootvnode = NULL; | rootvnode = NULL; | ||||
} | } | ||||
if (mp == rootdevmp) | if (mp == rootdevmp) | ||||
rootdevmp = NULL; | rootdevmp = NULL; | ||||
if ((flags & MNT_TASKQUEUE) != 0) | |||||
vfs_rel(mp); | |||||
vfs_mount_destroy(mp); | vfs_mount_destroy(mp); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Report errors during filesystem mounting. | * Report errors during filesystem mounting. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 695 Lines • Show Last 20 Lines |
There should be a blank line before multi-line comment, and after the end of the code which is described by it.