Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 5,698 Lines • ▼ Show 20 Lines | vfs_unixify_accmode(accmode_t *accmode) | ||||
* or VSYNCHRONIZE using file mode or POSIX.1e ACL. | * or VSYNCHRONIZE using file mode or POSIX.1e ACL. | ||||
*/ | */ | ||||
*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); | *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Clear out a doomed vnode (if any) and replace it with a new one as long | |||||
* as the fs is not being unmounted. Return the root vnode to the caller. | |||||
*/ | |||||
static int __noinline | |||||
vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) | |||||
{ | |||||
struct vnode *vp; | |||||
int error; | |||||
restart: | |||||
if (mp->mnt_rootvnode != NULL) { | |||||
MNT_ILOCK(mp); | |||||
vp = mp->mnt_rootvnode; | |||||
if (vp != NULL) { | |||||
if ((vp->v_iflag & VI_DOOMED) == 0) { | |||||
vrefact(vp); | |||||
MNT_IUNLOCK(mp); | |||||
error = vn_lock(vp, flags); | |||||
if (error == 0) { | |||||
*vpp = vp; | |||||
return (0); | |||||
} | |||||
vrele(vp); | |||||
goto restart; | |||||
} | |||||
/* | |||||
* Clear the old one. | |||||
*/ | |||||
mp->mnt_rootvnode = NULL; | |||||
} | |||||
MNT_IUNLOCK(mp); | |||||
if (vp != NULL) { | |||||
/* | |||||
* Paired with a fence in vfs_op_thread_exit(). | |||||
*/ | |||||
atomic_thread_fence_acq(); | |||||
vfs_op_barrier_wait(mp); | |||||
vrele(vp); | |||||
} | |||||
} | |||||
error = VFS_CACHEDROOT(mp, flags, vpp); | |||||
if (error != 0) | |||||
return (error); | |||||
if (mp->mnt_vfs_ops == 0) { | |||||
MNT_ILOCK(mp); | |||||
if (mp->mnt_vfs_ops != 0) { | |||||
jeff: Do we want to assert that the same vnode is returned as is cached in the case of a collision? | |||||
Done Inline ActionsI can add the assert no problem. If that's a possibility it already exists in the current code (that is 2 roughly concurrent calls to the _root routine end up with different vnodes) and sounds like solid bug, but we should probably audit if this can happen in sufficiently wrong conditions. With a long enough delay the original thread may perhaps unlock the vnode and have it doomed, at which point the other thread might have found a different one. i.e. it indeed may be the newly cached vnode is already doomed. mjg: I can add the assert no problem.
If that's a possibility it already exists in the current code… | |||||
MNT_IUNLOCK(mp); | |||||
return (0); | |||||
} | |||||
if (mp->mnt_rootvnode == NULL) { | |||||
vrefact(*vpp); | |||||
mp->mnt_rootvnode = *vpp; | |||||
} else { | |||||
if (mp->mnt_rootvnode != *vpp) { | |||||
Not Done Inline ActionsThis could be a single else if indented one level less. jeff: This could be a single else if indented one level less. | |||||
if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) { | |||||
panic("%s: mismatch between vnode returned " | |||||
" by VFS_CACHEDROOT and the one cached " | |||||
" (%p != %p)", | |||||
__func__, *vpp, mp->mnt_rootvnode); | |||||
} | |||||
} | |||||
} | |||||
MNT_IUNLOCK(mp); | |||||
} | |||||
return (0); | |||||
} | |||||
int | |||||
vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) | |||||
{ | |||||
struct vnode *vp; | |||||
int error; | |||||
if (!vfs_op_thread_enter(mp)) | |||||
return (vfs_cache_root_fallback(mp, flags, vpp)); | |||||
vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); | |||||
if (vp == NULL || (vp->v_iflag & VI_DOOMED)) { | |||||
vfs_op_thread_exit(mp); | |||||
return (vfs_cache_root_fallback(mp, flags, vpp)); | |||||
} | |||||
vrefact(vp); | |||||
vfs_op_thread_exit(mp); | |||||
error = vn_lock(vp, flags); | |||||
if (error != 0) { | |||||
vrele(vp); | |||||
return (vfs_cache_root_fallback(mp, flags, vpp)); | |||||
} | |||||
*vpp = vp; | |||||
return (0); | |||||
} | |||||
struct vnode * | |||||
vfs_cache_root_clear(struct mount *mp) | |||||
{ | |||||
struct vnode *vp; | |||||
/* | |||||
* ops > 0 guarantees there is nobody who can see this vnode | |||||
*/ | |||||
MPASS(mp->mnt_vfs_ops > 0); | |||||
vp = mp->mnt_rootvnode; | |||||
mp->mnt_rootvnode = NULL; | |||||
return (vp); | |||||
} | |||||
void | |||||
vfs_cache_root_set(struct mount *mp, struct vnode *vp) | |||||
{ | |||||
MPASS(mp->mnt_vfs_ops > 0); | |||||
vrefact(vp); | |||||
kibUnsubmitted Not Done Inline ActionsSo consumers of this function have usecount for the root vnode set to 2. kib: So consumers of this function have usecount for the root vnode set to 2. | |||||
mjgAuthorUnsubmitted Done Inline ActionsI don't think that's avoidable without playing with vflush. It's not a problem though. mjg: I don't think that's avoidable without playing with vflush. It's not a problem though. | |||||
mp->mnt_rootvnode = vp; | |||||
} | |||||
/* | |||||
* These are helper functions for filesystems to traverse all | * These are helper functions for filesystems to traverse all | ||||
* their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. | * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. | ||||
* | * | ||||
* This interface replaces MNT_VNODE_FOREACH. | * This interface replaces MNT_VNODE_FOREACH. | ||||
*/ | */ | ||||
MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); | MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); | ||||
▲ Show 20 Lines • Show All 272 Lines • Show Last 20 Lines |
Do we want to assert that the same vnode is returned as is cached in the case of a collision? Otherwise we may return with a vnode locked that is not the same as the cached root.