diff --git a/sbin/mount_nullfs/mount_nullfs.8 b/sbin/mount_nullfs/mount_nullfs.8 --- a/sbin/mount_nullfs/mount_nullfs.8 +++ b/sbin/mount_nullfs/mount_nullfs.8 @@ -90,7 +90,7 @@ .Xr mount 8 man page for possible options and their meanings. Additionally the following option is supported: -.Bl -tag -width nocache +.Bl -tag -width nounixbypass .It Cm nocache Disable metadata caching in the null layer. Some lower-layer file systems may force this option. @@ -98,6 +98,30 @@ this may result in increased lock contention. .It Cm cache Force enable metadata caching. +.It Cm nounixbypass +Disable bypassing the unix socket file used for +.Xr bind 2 +and +.Xr connect 2 , +to the lower (mounted-from) filesystem layer. +.Pp +The effect is that lower and upper (bypassed) unix sockets +are separate. +.It Cm unixbypass +Enable the bypass of unix socket file to lower filesystem layer. +This is default. +.Pp +The effect is that +.Xr bind 2 +and +.Xr connect 2 +operations on unix socket done on either upper (nullfs) or lower +layer file are performed on same unix socket. +For instance, if a server +.Xr bind 2 +is done on lower file, then +.Xr connect 2 +on the socket file visible through nullfs mount, connects to the server. .El .El .Pp diff --git a/sys/fs/nullfs/null.h b/sys/fs/nullfs/null.h --- a/sys/fs/nullfs/null.h +++ b/sys/fs/nullfs/null.h @@ -35,11 +35,12 @@ #ifndef FS_NULL_H #define FS_NULL_H -#define NULLM_CACHE 0x0001 - #include #include +#define NULLM_CACHE 0x0001 +#define NULLM_NOUNPBYPASS 0x0002 + struct null_mount { struct mount *nullm_vfs; struct vnode *nullm_lowerrootvp; /* Ref to lower root vnode */ @@ -82,6 +83,16 @@ #endif extern struct vop_vector null_vnodeops; +extern struct vop_vector null_vnodeops_no_unp_bypass; + +static inline bool +null_is_nullfs_vnode(struct vnode *vp) +{ + const struct vop_vector *op; + + op = vp->v_op; + return (op == &null_vnodeops || op == &null_vnodeops_no_unp_bypass); +} extern uma_zone_t null_node_zone; diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -240,7 +240,9 @@ */ xp = uma_zalloc_smr(null_node_zone, M_WAITOK); - error = getnewvnode("nullfs", mp, &null_vnodeops, &vp); + error = getnewvnode("nullfs", mp, (MOUNTTONULLMOUNT(mp)->nullm_flags & + NULLM_NOUNPBYPASS) != 0 ? &null_vnodeops_no_unp_bypass : + &null_vnodeops, &vp); if (error) { vput(lowervp); uma_zfree_smr(null_node_zone, xp); diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -85,6 +85,10 @@ char *target; int error, len; bool isvnunlocked; + static const char cache_opt_name[] = "cache"; + static const char nocache_opt_name[] = "nocache"; + static const char unixbypass_opt_name[] = "unixbypass"; + static const char nounixbypass_opt_name[] = "nounixbypass"; NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); @@ -116,7 +120,7 @@ /* * Unlock lower node to avoid possible deadlock. */ - if (mp->mnt_vnodecovered->v_op == &null_vnodeops && + if (null_is_nullfs_vnode(mp->mnt_vnodecovered) && VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { VOP_UNLOCK(mp->mnt_vnodecovered); isvnunlocked = true; @@ -150,7 +154,7 @@ /* * Check multi null mount to avoid `lock against myself' panic. */ - if (mp->mnt_vnodecovered->v_op == &null_vnodeops) { + if (null_is_nullfs_vnode(mp->mnt_vnodecovered)) { nn = VTONULL(mp->mnt_vnodecovered); if (nn == NULL || lowerrootvp == nn->null_lowervp) { NULLFSDEBUG("nullfs_mount: multi null mount?\n"); @@ -205,9 +209,10 @@ MNT_IUNLOCK(mp); } - if (vfs_getopt(mp->mnt_optnew, "cache", NULL, NULL) == 0) { + if (vfs_getopt(mp->mnt_optnew, cache_opt_name, NULL, NULL) == 0) { xmp->nullm_flags |= NULLM_CACHE; - } else if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) { + } else if (vfs_getopt(mp->mnt_optnew, nocache_opt_name, NULL, + NULL) == 0) { ; } else if (null_cache_vnodes && (xmp->nullm_vfs->mnt_kern_flag & MNTK_NULL_NOCACHE) == 0) { @@ -219,6 +224,13 @@ &xmp->notify_node); } + if (vfs_getopt(mp->mnt_optnew, unixbypass_opt_name, NULL, NULL) == 0) { + ; + } else if (vfs_getopt(mp->mnt_optnew, nounixbypass_opt_name, NULL, + NULL) == 0) { + xmp->nullm_flags |= NULLM_NOUNPBYPASS; + } + if (lowerrootvp == mp->mnt_vnodecovered) { vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE); lowerrootvp->v_vflag |= VV_CROSSLOCK; diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -278,7 +278,7 @@ * that aren't. (We must always map first vp or vclean fails.) */ if (i != 0 && (*this_vp_p == NULL || - (*this_vp_p)->v_op != &null_vnodeops)) { + !null_is_nullfs_vnode(*this_vp_p))) { old_vps[i] = NULL; } else { old_vps[i] = *this_vp_p; @@ -1256,3 +1256,11 @@ .vop_copy_file_range = VOP_PANIC, }; VFS_VOP_VECTOR_REGISTER(null_vnodeops); + +struct vop_vector null_vnodeops_no_unp_bypass = { + .vop_default = &null_vnodeops, + .vop_unp_bind = vop_stdunp_bind, + .vop_unp_connect = vop_stdunp_connect, + .vop_unp_detach = vop_stdunp_detach, +}; +VFS_VOP_VECTOR_REGISTER(null_vnodeops_no_unp_bypass);