diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -161,40 +161,17 @@ * gets allocated early. See nameiinit for the direct call below. */ -/* - * Returns busied the mount point mounted on the passed vnode, if any. - * - * The vnode's lock must be held and may be released on output, as indicated by - * '*unlocked'. The caller must also have an active reference on the vnode - * (vref() or vget()) which is preserved across the call. On success, the - * busied mount point is passed through 'mp'. - * - * If the vnode is not mounted-on, EJUSTRETURN is returned and '*mp' is set to - * NULL. Concurrent unmounts/remounts of the covering mount are handled - * transparently by restarting the process (doing so is currently not really - * necessary for correctness but is closer to the historical behavior where the - * unmounts/remounts were prevented to happen in this case, and will be required - * (but not enough) if we ever want to implement such things as atomic mount - * substitutions). ENOENT is returned if the vnode was doomed while trying to - * determine its covering mount, and '*mp' is set to NULL. Else, '*mp' is set - * to the busied mount point and 0 is returned. - */ +/* See vn_busy_mountedhere() in 'vnode.h'. */ int -vn_busy_mountedhere(struct vnode *vp, bool *unlocked, struct mount **mp) +vn_busy_mountedhere_mounted(struct vnode *vp, bool *unlocked, struct mount **mp) { int error; ASSERT_VOP_LOCKED(vp, __func__); ASSERT_VI_UNLOCKED(vp, __func__); - - *unlocked = false; - *mp = NULL; - - if (VN_IS_DOOMED(vp)) - return (ENOENT); - - if (__predict_true((vn_irflag_read(vp) & VIRF_MOUNTPOINT) == 0)) - return (EJUSTRETURN); + VNASSERT(!VN_IS_DOOMED(vp), vp, ("Input vnode is doomed")); + VNASSERT((vn_irflag_read(vp) & VIRF_MOUNTPOINT) != 0, vp, + ("Not a mount point")); *mp = vp->v_mountedhere; MPASS(*mp != NULL); @@ -206,8 +183,10 @@ * vfs_ref()/vfs_rel() calls. */ error = vfs_busy(*mp, MBF_NOWAIT); - if (__predict_true(error == 0)) + if (__predict_true(error == 0)) { + *unlocked = false; return (error); + } /* Make sure '*mp' survives the unlock of 'vp'. */ vfs_ref(*mp); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -826,8 +826,8 @@ int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, struct uio *uio); -int vn_busy_mountedhere(struct vnode *vp, bool *unlocked, - struct mount **mp); +static inline int +vn_busy_mountedhere(struct vnode *vp, bool *unlocked, struct mount **mp); int vn_cross_single_mount(struct vnode* vp, int root_lkflags, bool *unlocked, struct vnode **vpp); int vn_cross_mounts(struct vnode* vp, int lkflags, struct vnode **vpp); @@ -1092,6 +1092,46 @@ int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td); void vnode_destroy_vobject(struct vnode *vp); +/* Internal function to implement next function. */ +int vn_busy_mountedhere_mounted(struct vnode *vp, bool *unlocked, + struct mount **mp); + +/* + * Returns busied the mount point mounted on the passed vnode, if any. + * + * The vnode's lock must be held and may be released on output, as indicated by + * '*unlocked'. The caller must also have an active reference on the vnode + * (vref() or vget()) which is preserved across the call. The vnode must not + * have been doomed. On success, the busied mount point is passed through 'mp'. + * + * If the vnode is not mounted-on, EJUSTRETURN is returned and '*mp' is set to + * NULL. Concurrent unmounts/remounts of the covering mount are handled + * transparently by restarting the process (doing so is currently not really + * necessary for correctness but is closer to the historical behavior where the + * unmounts/remounts were prevented to happen in this case, and will be required + * (but not enough) if we ever want to implement such things as atomic mount + * substitutions). ENOENT is returned if the vnode was doomed while trying to + * determine its covering mount (which can happen because its lock may have been + * released in the contended case), and '*mp' is set to NULL. Else, '*mp' is + * set to the busied mount point and 0 is returned. + */ +static inline int +vn_busy_mountedhere(struct vnode *vp, bool *unlocked, struct mount **mp) +{ + ASSERT_VOP_LOCKED(vp, __func__); + ASSERT_VI_UNLOCKED(vp, __func__); + VNASSERT(!VN_IS_DOOMED(vp), vp, ("Input vnode is doomed")); + + if (__predict_true((vn_irflag_read(vp) & VIRF_MOUNTPOINT) == 0)) { + *unlocked = false; + *mp = NULL; + return (EJUSTRETURN); + } + + return (vn_busy_mountedhere_mounted(vp, unlocked, mp)); +} + + extern struct vop_vector fifo_specops; extern struct vop_vector dead_vnodeops; extern struct vop_vector default_vnodeops;