diff --git a/share/man/man9/namei.9 b/share/man/man9/namei.9 index 3f2f43e94cd1..5bdffce8c360 100644 --- a/share/man/man9/namei.9 +++ b/share/man/man9/namei.9 @@ -1,548 +1,557 @@ .\" .\" Copyright (c) 1998, 1999 Eivind Eklund .\" Copyright (c) 2003 Hiten M. Pandya .\" Copyright (c) 2005 Robert N. M. Watson .\" .\" All rights reserved. .\" .\" This program is free software. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" .\" If you integrate this manpage in another OS, I'd appreciate a note .\" - eivind@FreeBSD.org .\" .Dd May 16, 2025 .Dt NAMEI 9 .Os .Sh NAME .Nm namei , .Nm NDINIT , .Nm NDINIT_AT , .Nm NDFREE_PNBUF .Nd pathname translation and lookup operations .Sh SYNOPSIS .In sys/param.h .In sys/fcntl.h .In sys/namei.h .Ft int .Fn namei "struct nameidata *ndp" .Ft void .Fo NDINIT .Fa "struct nameidata *ndp" "enum nameiop op" "u_int64_t flags" .Fa "enum uio_seg segflg" "const char *namep" .Fc .Ft void .Fo NDINIT_AT .Fa "struct nameidata *ndp" "enum nameiop op" "u_int64_t flags" .Fa "enum uio_seg segflg" "const char *namep" "int dirfd" .Fc .Ft void .Fn NDFREE_PNBUF "struct nameidata *ndp" .Sh DESCRIPTION The .Nm facility allows the client to perform pathname translation and lookup operations. The .Nm functions will increment the reference count for the vnode in question. The reference count has to be decremented after use of the vnode, by using either .Xr vrele 9 or .Xr vput 9 , depending on whether the .Dv LOCKLEAF flag was specified or not. .Pp The .Fn NDINIT macro is used to initialize .Nm components. It takes the following arguments: .Bl -tag -width ".Fa segflg" .It Fa ndp A pointer to the .Vt "struct nameidata" to initialize. .It Fa op The operation which .Fn namei will perform. The following operations are valid: .Dv LOOKUP , CREATE , DELETE , and .Dv RENAME . The latter three are just setup for those effects; just calling .Fn namei will not result in .Fn VOP_RENAME being called. .It Fa flags Operation flags, described in the next section. Several of these can be effective at the same time. .It Fa segflg UIO segment indicator. This indicates if the name of the object is in userspace .Pq Dv UIO_USERSPACE or in the kernel address space .Pq Dv UIO_SYSSPACE . .It Fa namep Pointer to the component's pathname buffer (the file or directory name that will be looked up). .El .Pp The .Fn NDINIT_AT macro is similar to .Fn NDINIT , but takes one extra argument: .Bl -tag -width ".Fa segflg" .It Fa dirfd File descriptor referencing a directory, or the special value .Dv AT_FDCWD meaning the calling thread's current working directory. Lookups will be performed relative to this directory. .El .Pp The .Fn NDFREE_PNBUF macro is used to free the pathname buffer. It must be called exactly once for each successful .Fn namei call. It takes the following argument: .Bl -tag -width ".Fa segflg" .It Fa ndp A pointer to a .Vt "struct nameidata" that was used in a successful .Fn namei call. .El .Sh NAMEI OPERATION FLAGS The .Fn namei function takes the following set of .Dq "operation flags" that influence its operation: .Bl -tag -width NC_KEEPPOSENTRY .It Dv NC_NOMAKEENTRY An alias for .Dv NOCACHE . .It Dv NC_KEEPPOSENTRY Keep the positive-caching entry in cache. This flag is typically combined with .Dv NOCACHE to not cache a new entry, but keep existing one, or with .Dv MAKEENTRY . .It Dv NOCACHE Avoid .Fn namei creating this entry in the namecache if it is not already present. Normally, .Fn namei will add entries to the name cache if they are not already there. .It Dv LOCKLEAF Lock vnode on return with .Dv LK_EXCLUSIVE unless .Dv LOCKSHARED is also set. .Xr VOP_UNLOCK 9 should be used to release the lock (or .Xr vput 9 which is equivalent to calling .Xr VOP_UNLOCK 9 followed by .Xr vrele 9 , all in one). .It Dv LOCKPARENT This flag lets the .Fn namei function return the parent (directory) vnode, .Va ni_dvp in locked state, unless it is identical to .Va ni_vp , in which case .Va ni_dvp is not locked per se (but may be locked due to .Dv LOCKLEAF ) . If a lock is enforced, it should be released using .Xr vput 9 or .Xr VOP_UNLOCK 9 and .Xr vrele 9 . .It Dv WANTPARENT This flag allows the .Fn namei function to return the parent (directory) vnode in an unlocked state. The parent vnode must be released separately by using .Xr vrele 9 . .It Dv WANTPARENT This flag allows the .Fn namei function to return the parent (directory) vnode in an unlocked state. The parent vnode must be released separately by using .Xr vrele 9 . .It Dv FAILIFEXISTS Makes the .Nm operation fail if the target exists. It requires that the .Dv LOCKPARENT flag is set and .Dv LOCKLEAF is not. .It Dv FOLLOW With this flag, .Fn namei will follow the symbolic link if the last part of the path supplied is a symbolic link (i.e., it will return a vnode for whatever the link points at, instead for the link itself). .It Dv EMPTYPATH For .Nm call initialized with .Fn NDINIT_AT , allow the .Fa namep path to be empty. In this case, the .Fa dirfd file descriptor may reference a file of arbitrary type, not necessary a directory, and lookup returns the vnode for this file. .It Dv LOCKSHARED Lock vnode on return with .Dv LK_SHARED , if permitted by the file system that owns the vnode. The file system must explicitly permit this by setting .Dv MNTK_LOOKUP_SHARED in .Dv mp->mnt_kern_flag during mount and by calling .Fn VN_LOCK_ASHARE when allocating the vnode. If .Dv LOCKLEAF is specified but shared locking is not permitted, then the vnode will be returned with .Dv LK_EXCLUSIVE . .Xr VOP_UNLOCK 9 should be used to release the lock (or .Xr vput 9 which is equivalent to calling .Xr VOP_UNLOCK 9 followed by .Xr vrele 9 , all in one). .It Dv NOFOLLOW Do not follow symbolic links (pseudo). This flag is not looked for by the actual code, which looks for .Dv FOLLOW . .Dv NOFOLLOW is used to indicate to the source code reader that symlinks are intentionally not followed. .It Dv RBENEATH Requires that .Nm did not cross the .Fa dirfd directory. The flag is used to implement .Dv O_RESOLVE_BENEATH flag for .Xr openat 2 . +.It Dv NAMEILOOKUP +The component is embedded in a +.Nm +lookup structure, and the +.Fn vfs_lookup_nameidata +function can be used to obtain that structure. +This can be useful in +.Xr VOP_LOOKUP 9 +implementations which need to obtain extra lookup metadata. .El .Sh PARAMETERS DESCRIPTORS FLAGS These flags are used for several purposes. Some of them affects the global .Nm operation, some provide information for processing of the specific path element, for instance, by the .Dv VOP_LOOKUP implementation of the involved filesystem. .Bl -tag -width IGNOREWHITEOUT .It Dv RDONLY Specifies that the lookup should act as if the final node is located on read-only mount. The flag is typically used by file servers, e.g. NFS, to handle read-only exports. .It Dv ISRESTARTED The .Nm was restarted with .Fn NDRESTART . This is used internally for double-root lookups used by ABI subsystems, after the lookup with native root failed. The components are reset to the original values, and lookup is repeated with different root, once. .It Dv IGNOREWHITEOUT Ignore whiteouts, e.g. when checking if a directory is empty. .It Dv ISWHITEOUT The result of lookup is whiteout. .It Dv DOWHITEOUT Handle whiteouts, the instruction for the .Fn VOP_LOOKUP filesystem methods. .It Dv WILLBEDIR The lookup is done for creating a new entry that will be directory. It allows the trailing slash in the path string. .It Dv ISOPEN The caller is the code that opens a file. This allows to weaken the lock mode of the return vnode, if the mount point indicates extended shared lock support. .It Dv NOCROSSMOUNT Do not cross mount points during lookup. .Pp For .Dq .. lookup leading to mount root, returns the root vnode of the mount instead of the covered vnode of the filesystem where the mount was placed. .Pp For other lookups passing over mount, do not jump into the mounted filesystem. This allows to descend into the file hierarchy otherwise shadowed by the mount point. .It Dv NOMACCHECK Do not perform MAC checks during lookup. .It Dv AUDITVNODE1 Audit the looked up vnode information, use the first slot for audit information. .It Dv AUDITVNODE2 Same as .Dv AUDITVNODE1 but use the second slot. .It Dv NOCAPCHECK Do not perform capability checks. If the calling process is in capability mode, lookup is denied outright. .It Dv OPENREAD The lookup was for open and file will be opened for read. .It Dv OPENWRITE The lookup was for open and file will be opened for write. .It Dv WANTIOCTLCAPS Leave ioctl caps for the caller. See the description of .Nm results. .It Dv OPENNAMED Opening a named attribute (directory). .It Dv NOEXECCHECK Do not perform check for allowed execution on the starting directory. It is used to implement the POSIX-required semantic for .Xr openat 2 lookups that must use the permissions from time the directory was opened, and not when used for lookup. .It Dv MAKEENTRY Looked-up entry is to be added to name cache. .It Dv ISSYMLINK Current component is symlink, and it needs the interpretation according to the .Dv FOLLOW or .Dv NOFOLLOW flags. .It Dv ISLASTCN This is last component of pathname. It is handled specially, many flags augment its processing. .It Dv ISDOTDOT Current component name is .Dq .. . Usually implies a need to specially handle the vnode locking for instantiation of the target vnode. The generic .Fn vn_vget_ino_gen function and its more specialized variant .Fn vn_vget_ino might be useful. .It Dv TRAILINGSLASH Path ended in a slash. .It Dv CREATENAMED Create a named attribute dir. .El .Sh ALLOCATED ELEMENTS The .Vt nameidata structure is composed of the following fields: .Bl -tag -width ".Va ni_cnd.cn_pnbuf" .It Va ni_startdir In the normal case, this is either the current directory or the root. It is the current directory if the name passed in does not start with .Ql / and we have not gone through any symlinks with an absolute path, and the root otherwise. .Pp In this case, it is only used by .Fn vfs_lookup , and should not be considered valid after a call to .Fn namei . .It Va ni_dvp Vnode pointer to directory of the object on which lookup is performed. This is available on successful return if .Dv LOCKPARENT or .Dv WANTPARENT is set. It is locked if .Dv LOCKPARENT is set. .It Va ni_vp Vnode pointer to the resulting object, .Dv NULL otherwise. The .Va v_usecount field of this vnode is incremented. If .Dv LOCKLEAF is set, it is also locked. .Pp .It Va ni_cnd.cn_pnbuf The pathname buffer contains the location of the file or directory that will be used by the .Nm operations. It is managed by the .Xr uma 9 zone allocation interface. .El .Sh RESULTS The .Vt struct namei member .Dv ni_resflags returns the following flags giving some details of the succesfull operation: .Bl -tag -width NIRES_EMPTYPATH .It Dv NIRES_ABS The path passed was absolute. .It Dv NIRES_STRICTREL Restricted lookup result. Only relative lookups were done to resolve the path to vnode. .It Dv NIRES_EMPTYPATH The .Dv EMPTYPATH flag was provided and used. In particular, the passed path was empty. .El .Pp If the .Dv WANTIOCTLCAPS flag was specified, on return the .Va ni_filecaps member of the .Vt struct namei contains the capabilities of the file descriptor used as the lookup starting point .Pq Va dirfd . .Sh RETURN VALUES If successful, .Fn namei will return 0, otherwise it will return an error. .Sh FILES .Bl -tag -width Pa .It Pa src/sys/kern/vfs_lookup.c .El .Sh EXAMPLES Assuming the .Dv path variable contains a pointer to userspace path string, the following example looks up the file named by it, and performs required error and resource handling: .Bd -literal char *path; struct nameidata nd; int error; NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_USERSPACE, path); if ((error = namei(&nd)) != 0) return (error); NDFREE_PNBUF(&nd); ... use nd.ni_vp vnode .Ed .Sh ERRORS Errors which .Fn namei may return: .Bl -tag -width Er .It Bq Er ENOTDIR A component of the specified pathname is not a directory when a directory is expected. .It Bq Er ENAMETOOLONG A component of a pathname exceeded 255 characters, or an entire pathname exceeded 1023 characters. .It Bq Er ENOENT A component of the specified pathname does not exist, or the pathname is an empty string. .It Bq Er EACCES An attempt is made to access a file in a way forbidden by its file access permissions. .It Bq Er ELOOP Too many symbolic links were encountered in translating the pathname. .It Bq Er EISDIR An attempt is made to open a directory with write mode specified. .It Bq Er EINVAL The last component of the pathname specified for a .Dv DELETE or .Dv RENAME operation is .Ql \&. . .It Bq Er EROFS An attempt is made to modify a file or directory on a read-only file system. .El .Sh SEE ALSO .Xr uio 9 , .Xr uma 9 , .Xr VFS 9 , .Xr vnode 9 , .Xr vput 9 , .Xr vref 9 , .Xr vrele 9 .Sh AUTHORS .An -nosplit This manual page was written by .An Eivind Eklund Aq Mt eivind@FreeBSD.org and later significantly revised by .An Hiten M. Pandya Aq Mt hmp@FreeBSD.org . .Sh BUGS The .Dv LOCKPARENT flag does not always result in the parent vnode being locked. This results in complications when the .Dv LOCKPARENT is used. In order to solve this for the cases where both .Dv LOCKPARENT and .Dv LOCKLEAF are used, it is necessary to resort to recursive locking. diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index a6b15c1dfa78..8608216e10e5 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -1,1182 +1,1190 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * John Heidemann of the UCLA Ficus project. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Ancestors: * ...and... */ /* * Null Layer * * (See mount_nullfs(8) for more information.) * * The null layer duplicates a portion of the filesystem * name space under a new name. In this respect, it is * similar to the loopback filesystem. It differs from * the loopback fs in two respects: it is implemented using * a stackable layers techniques, and its "null-node"s stack above * all lower-layer vnodes, not just over directory vnodes. * * The null layer has two purposes. First, it serves as a demonstration * of layering by proving a layer which does nothing. (It actually * does everything the loopback filesystem does, which is slightly * more than nothing.) Second, the null layer can serve as a prototype * layer. Since it provides all necessary layer framework, * new filesystem layers can be created very easily be starting * with a null layer. * * The remainder of this man page examines the null layer as a basis * for constructing new layers. * * * INSTANTIATING NEW NULL LAYERS * * New null layers are created with mount_nullfs(8). * Mount_nullfs(8) takes two arguments, the pathname * of the lower vfs (target-pn) and the pathname where the null * layer will appear in the namespace (alias-pn). After * the null layer is put into place, the contents * of target-pn subtree will be aliased under alias-pn. * * * OPERATION OF A NULL LAYER * * The null layer is the minimum filesystem layer, * simply bypassing all possible operations to the lower layer * for processing there. The majority of its activity centers * on the bypass routine, through which nearly all vnode operations * pass. * * The bypass routine accepts arbitrary vnode operations for * handling by the lower layer. It begins by examining vnode * operation arguments and replacing any null-nodes by their * lower-layer equivlants. It then invokes the operation * on the lower layer. Finally, it replaces the null-nodes * in the arguments and, if a vnode is return by the operation, * stacks a null-node on top of the returned vnode. * * Although bypass handles most operations, vop_getattr, vop_lock, * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not * bypassed. Vop_getattr must change the fsid being returned. * Vop_lock and vop_unlock must handle any locking for the * current vnode as well as pass the lock request down. * Vop_inactive and vop_reclaim are not bypassed so that * they can handle freeing null-layer specific data. Vop_print * is not bypassed to avoid excessive debugging information. * Also, certain vnode operations change the locking state within * the operation (create, mknod, remove, link, rename, mkdir, rmdir, * and symlink). Ideally these operations should not change the * lock state, but should be changed to let the caller of the * function unlock them. Otherwise all intermediate vnode layers * (such as union, umapfs, etc) must catch these functions to do * the necessary locking at their layer. * * * INSTANTIATING VNODE STACKS * * Mounting associates the null layer with a lower layer, * effect stacking two VFSes. Vnode stacks are instead * created on demand as files are accessed. * * The initial mount creates a single vnode stack for the * root of the new null layer. All other vnode stacks * are created as a result of vnode operations on * this or other null vnode stacks. * * New vnode stacks come into existence as a result of * an operation which returns a vnode. * The bypass routine stacks a null-node above the new * vnode before returning it to the caller. * * For example, imagine mounting a null layer with * "mount_nullfs /usr/include /dev/layer/null". * Changing directory to /dev/layer/null will assign * the root null-node (which was created when the null layer was mounted). * Now consider opening "sys". A vop_lookup would be * done on the root null-node. This operation would bypass through * to the lower layer which would return a vnode representing * the UFS "sys". Null_bypass then builds a null-node * aliasing the UFS "sys" and returns this to the caller. * Later operations on the null-node "sys" will repeat this * process when constructing other vnode stacks. * * * CREATING OTHER FILE SYSTEM LAYERS * * One of the easiest ways to construct new filesystem layers is to make * a copy of the null layer, rename all files and variables, and * then begin modifing the copy. Sed can be used to easily rename * all variables. * * The umap layer is an example of a layer descended from the * null layer. * * * INVOKING OPERATIONS ON LOWER LAYERS * * There are two techniques to invoke operations on a lower layer * when the operation cannot be completely bypassed. Each method * is appropriate in different situations. In both cases, * it is the responsibility of the aliasing layer to make * the operation arguments "correct" for the lower layer * by mapping a vnode arguments to the lower layer. * * The first approach is to call the aliasing layer's bypass routine. * This method is most suitable when you wish to invoke the operation * currently being handled on the lower layer. It has the advantage * that the bypass routine already must do argument mapping. * An example of this is null_getattrs in the null layer. * * A second approach is to directly invoke vnode operations on * the lower layer with the VOP_OPERATIONNAME interface. * The advantage of this method is that it is easy to invoke * arbitrary operations on the lower layer. The disadvantage * is that vnode arguments must be manualy mapped. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW, &null_bug_bypass, 0, ""); /* * This is the 10-Apr-92 bypass routine. * This version has been optimized for speed, throwing away some * safety checks. It should still always work, but it's not as * robust to programmer errors. * * In general, we map all vnodes going down and unmap them on the way back. * As an exception to this, vnodes can be marked "unmapped" by setting * the Nth bit in operation's vdesc_flags. * * Also, some BSD vnode operations have the side effect of vrele'ing * their arguments. With stacking, the reference counts are held * by the upper node, not the lower one, so we must handle these * side-effects here. This is not of concern in Sun-derived systems * since there are no such side-effects. * * This makes the following assumptions: * - only one returned vpp * - no INOUT vpp's (Sun's vop_open has one of these) * - the vnode operation vector of the first vnode should be used * to determine what implementation of the op should be invoked * - all mapped vnodes are of our vnode-type (NEEDSWORK: * problems on rmdir'ing mount points and renaming?) */ int null_bypass(struct vop_generic_args *ap) { struct vnode **this_vp_p; struct vnode *old_vps[VDESC_MAX_VPS]; struct vnode **vps_p[VDESC_MAX_VPS]; struct vnode ***vppp; struct vnode *lvp; struct vnodeop_desc *descp = ap->a_desc; int error, i, reles; if (null_bug_bypass) printf ("null_bypass: %s\n", descp->vdesc_name); #ifdef DIAGNOSTIC /* * We require at least one vp. */ if (descp->vdesc_vp_offsets == NULL || descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) panic ("null_bypass: no vp's in map"); #endif /* * Map the vnodes going in. * Later, we'll invoke the operation based on * the first mapped vnode's operation vector. */ reles = descp->vdesc_flags; for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) break; /* bail out at end of list */ vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **, descp->vdesc_vp_offsets[i], ap); /* * We're not guaranteed that any but the first vnode * are of our type. Check for and don't map any * that aren't. (We must always map first vp or vclean fails.) */ if (i != 0 && (*this_vp_p == NULLVP || (*this_vp_p)->v_op != &null_vnodeops)) { old_vps[i] = NULLVP; } else { old_vps[i] = *this_vp_p; *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); /* * The upper vnode reference to the lower * vnode is the only reference that keeps our * pointer to the lower vnode alive. If lower * vnode is relocked during the VOP call, * upper vnode might become unlocked and * reclaimed, which invalidates our reference. * Add a transient hold around VOP call. */ vhold(*this_vp_p); /* * XXX - Several operations have the side effect * of vrele'ing their vp's. We must account for * that. (This should go away in the future.) */ if (reles & VDESC_VP0_WILLRELE) vref(*this_vp_p); } } /* * Call the operation on the lower layer * with the modified argument structure. */ if (vps_p[0] != NULL && *vps_p[0] != NULL) { error = VCALL(ap); } else { printf("null_bypass: no map for %s\n", descp->vdesc_name); error = EINVAL; } /* * Maintain the illusion of call-by-value * by restoring vnodes in the argument structure * to their original value. */ reles = descp->vdesc_flags; for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) break; /* bail out at end of list */ if (old_vps[i] != NULL) { lvp = *(vps_p[i]); /* * Get rid of the transient hold on lvp. * If lowervp was unlocked during VOP * operation, nullfs upper vnode could have * been reclaimed, which changes its v_vnlock * back to private v_lock. In this case we * must move lock ownership from lower to * upper (reclaimed) vnode. */ if (lvp != NULLVP) { if (VOP_ISLOCKED(lvp) == LK_EXCLUSIVE && old_vps[i]->v_vnlock != lvp->v_vnlock) { VOP_UNLOCK(lvp); VOP_LOCK(old_vps[i], LK_EXCLUSIVE | LK_RETRY); } vdrop(lvp); } *(vps_p[i]) = old_vps[i]; #if 0 if (reles & VDESC_VP0_WILLUNLOCK) VOP_UNLOCK(*(vps_p[i]), 0); #endif if (reles & VDESC_VP0_WILLRELE) vrele(*(vps_p[i])); } } /* * Map the possible out-going vpp * (Assumes that the lower layer always returns * a VREF'ed vpp unless it gets an error.) */ if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) { /* * XXX - even though some ops have vpp returned vp's, * several ops actually vrele this before returning. * We must avoid these ops. * (This should go away when these ops are regularized.) */ vppp = VOPARG_OFFSETTO(struct vnode ***, descp->vdesc_vpp_offset, ap); if (*vppp != NULL) error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp); } return (error); } static int null_add_writecount(struct vop_add_writecount_args *ap) { struct vnode *lvp, *vp; int error; vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); VI_LOCK(vp); /* text refs are bypassed to lowervp */ VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount")); VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, ("wrong writecount inc %d", ap->a_inc)); error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc); if (error == 0) vp->v_writecount += ap->a_inc; VI_UNLOCK(vp); return (error); } /* * We have to carry on the locking protocol on the null layer vnodes * as we progress through the tree. We also have to enforce read-only * if this layer is mounted read-only. */ static int null_lookup(struct vop_lookup_args *ap) { struct componentname *cnp = ap->a_cnp; struct vnode *dvp = ap->a_dvp; uint64_t flags = cnp->cn_flags; struct vnode *vp, *ldvp, *lvp; struct mount *mp; int error; mp = dvp->v_mount; if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) return (EROFS); /* * Although it is possible to call null_bypass(), we'll do * a direct call to reduce overhead */ ldvp = NULLVPTOLOWERVP(dvp); vp = lvp = NULL; /* * Renames in the lower mounts might create an inconsistent - * configuration where lower vnode is moved out of the - * directory tree remounted by our null mount. Do not try to - * handle it fancy, just avoid VOP_LOOKUP() with DOTDOT name - * which cannot be handled by VOP, at least passing over lower - * root. + * configuration where lower vnode is moved out of the directory tree + * remounted by our null mount. + * + * Do not try to handle it fancy, just avoid VOP_LOOKUP() with DOTDOT + * name which cannot be handled by the VOP. */ - if ((ldvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) != 0) { - KASSERT((dvp->v_vflag & VV_ROOT) == 0, - ("ldvp %p fl %#x dvp %p fl %#x flags %#jx", - ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, (uintmax_t)flags)); - return (ENOENT); + if ((flags & ISDOTDOT) != 0) { + struct nameidata *ndp; + + if ((ldvp->v_vflag & VV_ROOT) != 0) { + KASSERT((dvp->v_vflag & VV_ROOT) == 0, + ("ldvp %p fl %#x dvp %p fl %#x flags %#jx", + ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, + (uintmax_t)flags)); + return (ENOENT); + } + ndp = vfs_lookup_nameidata(cnp); + if (ndp != NULL && vfs_lookup_isroot(ndp, ldvp)) + return (ENOENT); } /* * Hold ldvp. The reference on it, owned by dvp, is lost in * case of dvp reclamation, and we need ldvp to move our lock * from ldvp to dvp. */ vhold(ldvp); error = VOP_LOOKUP(ldvp, &lvp, cnp); /* * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows * dvp to be reclaimed due to shared v_vnlock. Check for the * doomed state and return error. */ if (VN_IS_DOOMED(dvp)) { if (error == 0 || error == EJUSTRETURN) { if (lvp != NULL) vput(lvp); error = ENOENT; } /* * If vgone() did reclaimed dvp before curthread * relocked ldvp, the locks of dvp and ldpv are no * longer shared. In this case, relock of ldvp in * lower fs VOP_LOOKUP() does not restore the locking * state of dvp. Compensate for this by unlocking * ldvp and locking dvp, which is also correct if the * locks are still shared. */ VOP_UNLOCK(ldvp); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); } vdrop(ldvp); if (error == EJUSTRETURN && (flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) error = EROFS; if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { if (ldvp == lvp) { *ap->a_vpp = dvp; VREF(dvp); vrele(lvp); } else { error = null_nodeget(mp, lvp, &vp); if (error == 0) *ap->a_vpp = vp; } } return (error); } static int null_open(struct vop_open_args *ap) { int retval; struct vnode *vp, *ldvp; vp = ap->a_vp; ldvp = NULLVPTOLOWERVP(vp); retval = null_bypass(&ap->a_gen); if (retval == 0) { vp->v_object = ldvp->v_object; if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) { MPASS(vp->v_object != NULL); if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) { vn_irflag_set_cond(vp, VIRF_PGREAD); } } } return (retval); } /* * Setattr call. Disallow write attempts if the layer is mounted read-only. */ static int null_setattr(struct vop_setattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && (vp->v_mount->mnt_flag & MNT_RDONLY)) return (EROFS); if (vap->va_size != VNOVAL) { switch (vp->v_type) { case VDIR: return (EISDIR); case VCHR: case VBLK: case VSOCK: case VFIFO: if (vap->va_flags != VNOVAL) return (EOPNOTSUPP); return (0); case VREG: case VLNK: default: /* * Disallow write attempts if the filesystem is * mounted read-only. */ if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); } } return (null_bypass(&ap->a_gen)); } /* * We handle stat and getattr only to change the fsid. */ static int null_stat(struct vop_stat_args *ap) { int error; if ((error = null_bypass(&ap->a_gen)) != 0) return (error); ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; return (0); } static int null_getattr(struct vop_getattr_args *ap) { int error; if ((error = null_bypass(&ap->a_gen)) != 0) return (error); ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; return (0); } /* * Handle to disallow write access if mounted read-only. */ static int null_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; accmode_t accmode = ap->a_accmode; /* * Disallow write attempts on read-only layers; * unless the file is a socket, fifo, or a block or * character device resident on the filesystem. */ if (accmode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } return (null_bypass(&ap->a_gen)); } static int null_accessx(struct vop_accessx_args *ap) { struct vnode *vp = ap->a_vp; accmode_t accmode = ap->a_accmode; /* * Disallow write attempts on read-only layers; * unless the file is a socket, fifo, or a block or * character device resident on the filesystem. */ if (accmode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } return (null_bypass(&ap->a_gen)); } /* * Increasing refcount of lower vnode is needed at least for the case * when lower FS is NFS to do sillyrename if the file is in use. * Unfortunately v_usecount is incremented in many places in * the kernel and, as such, there may be races that result in * the NFS client doing an extraneous silly rename, but that seems * preferable to not doing a silly rename when it is needed. */ static int null_remove(struct vop_remove_args *ap) { int retval, vreleit; struct vnode *lvp, *vp; vp = ap->a_vp; if (vrefcnt(vp) > 1) { lvp = NULLVPTOLOWERVP(vp); VREF(lvp); vreleit = 1; } else vreleit = 0; VTONULL(vp)->null_flags |= NULLV_DROP; retval = null_bypass(&ap->a_gen); if (vreleit != 0) vrele(lvp); return (retval); } /* * We handle this to eliminate null FS to lower FS * file moving. Don't know why we don't allow this, * possibly we should. */ static int null_rename(struct vop_rename_args *ap) { struct vnode *fdvp, *fvp, *tdvp, *tvp; struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp; struct null_node *fdnn, *fnn, *tdnn, *tnn; int error; tdvp = ap->a_tdvp; fvp = ap->a_fvp; fdvp = ap->a_fdvp; tvp = ap->a_tvp; lfdvp = NULL; /* Check for cross-device rename. */ if ((fvp->v_mount != tdvp->v_mount) || (tvp != NULL && fvp->v_mount != tvp->v_mount)) { error = EXDEV; goto upper_err; } VI_LOCK(fdvp); fdnn = VTONULL(fdvp); if (fdnn == NULL) { /* fdvp is not locked, can be doomed */ VI_UNLOCK(fdvp); error = ENOENT; goto upper_err; } lfdvp = fdnn->null_lowervp; vref(lfdvp); VI_UNLOCK(fdvp); VI_LOCK(fvp); fnn = VTONULL(fvp); if (fnn == NULL) { VI_UNLOCK(fvp); error = ENOENT; goto upper_err; } lfvp = fnn->null_lowervp; vref(lfvp); VI_UNLOCK(fvp); tdnn = VTONULL(tdvp); ltdvp = tdnn->null_lowervp; vref(ltdvp); if (tvp != NULL) { tnn = VTONULL(tvp); ltvp = tnn->null_lowervp; vref(ltvp); tnn->null_flags |= NULLV_DROP; } else { ltvp = NULL; } error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp); vrele(fdvp); vrele(fvp); vrele(tdvp); if (tvp != NULL) vrele(tvp); return (error); upper_err: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); if (lfdvp != NULL) vrele(lfdvp); vrele(fdvp); vrele(fvp); return (error); } static int null_rmdir(struct vop_rmdir_args *ap) { VTONULL(ap->a_vp)->null_flags |= NULLV_DROP; return (null_bypass(&ap->a_gen)); } /* * We need to process our own vnode lock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */ static int null_lock(struct vop_lock1_args *ap) { struct vnode *vp = ap->a_vp; int flags; struct null_node *nn; struct vnode *lvp; int error; if ((ap->a_flags & LK_INTERLOCK) == 0) VI_LOCK(vp); else ap->a_flags &= ~LK_INTERLOCK; flags = ap->a_flags; nn = VTONULL(vp); /* * If we're still active we must ask the lower layer to * lock as ffs has special lock considerations in its * vop lock. */ if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { /* * We have to hold the vnode here to solve a potential * reclaim race. If we're forcibly vgone'd while we * still have refs, a thread could be sleeping inside * the lowervp's vop_lock routine. When we vgone we will * drop our last ref to the lowervp, which would allow it * to be reclaimed. The lowervp could then be recycled, * in which case it is not legal to be sleeping in its VOP. * We prevent it from being recycled by holding the vnode * here. */ vholdnz(lvp); VI_UNLOCK(vp); error = VOP_LOCK(lvp, flags); /* * We might have slept to get the lock and someone might have * clean our vnode already, switching vnode lock from one in * lowervp to v_lock in our own vnode structure. Handle this * case by reacquiring correct lock in requested mode. */ if (VTONULL(vp) == NULL && error == 0) { ap->a_flags &= ~LK_TYPE_MASK; switch (flags & LK_TYPE_MASK) { case LK_SHARED: ap->a_flags |= LK_SHARED; break; case LK_UPGRADE: case LK_EXCLUSIVE: ap->a_flags |= LK_EXCLUSIVE; break; default: panic("Unsupported lock request %d\n", ap->a_flags); } VOP_UNLOCK(lvp); error = vop_stdlock(ap); } vdrop(lvp); } else { VI_UNLOCK(vp); error = vop_stdlock(ap); } return (error); } /* * We need to process our own vnode unlock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */ static int null_unlock(struct vop_unlock_args *ap) { struct vnode *vp = ap->a_vp; struct null_node *nn; struct vnode *lvp; int error; nn = VTONULL(vp); if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { vholdnz(lvp); error = VOP_UNLOCK(lvp); vdrop(lvp); } else { error = vop_stdunlock(ap); } return (error); } /* * Do not allow the VOP_INACTIVE to be passed to the lower layer, * since the reference count on the lower vnode is not related to * ours. */ static int null_want_recycle(struct vnode *vp) { struct vnode *lvp; struct null_node *xp; struct mount *mp; struct null_mount *xmp; xp = VTONULL(vp); lvp = NULLVPTOLOWERVP(vp); mp = vp->v_mount; xmp = MOUNTTONULLMOUNT(mp); if ((xmp->nullm_flags & NULLM_CACHE) == 0 || (xp->null_flags & NULLV_DROP) != 0 || (lvp->v_vflag & VV_NOSYNC) != 0) { /* * If this is the last reference and caching of the * nullfs vnodes is not enabled, or the lower vnode is * deleted, then free up the vnode so as not to tie up * the lower vnodes. */ return (1); } return (0); } static int null_inactive(struct vop_inactive_args *ap) { struct vnode *vp; vp = ap->a_vp; if (null_want_recycle(vp)) { vp->v_object = NULL; vrecycle(vp); } return (0); } static int null_need_inactive(struct vop_need_inactive_args *ap) { return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp)); } /* * Now, the nullfs vnode and, due to the sharing lock, the lower * vnode, are exclusively locked, and we shall destroy the null vnode. */ static int null_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp; struct null_node *xp; struct vnode *lowervp; vp = ap->a_vp; xp = VTONULL(vp); lowervp = xp->null_lowervp; KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, ("Reclaiming incomplete null vnode %p", vp)); null_hashrem(xp); /* * Use the interlock to protect the clearing of v_data to * prevent faults in null_lock(). */ lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); VI_LOCK(vp); vp->v_data = NULL; vp->v_object = NULL; vp->v_vnlock = &vp->v_lock; /* * If we were opened for write, we leased the write reference * to the lower vnode. If this is a reclamation due to the * forced unmount, undo the reference now. */ if (vp->v_writecount > 0) VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount); else if (vp->v_writecount < 0) vp->v_writecount = 0; VI_UNLOCK(vp); if ((xp->null_flags & NULLV_NOUNLOCK) != 0) vunref(lowervp); else vput(lowervp); free(xp, M_NULLFSNODE); return (0); } static int null_print(struct vop_print_args *ap) { struct vnode *vp = ap->a_vp; printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); return (0); } /* ARGSUSED */ static int null_getwritemount(struct vop_getwritemount_args *ap) { struct null_node *xp; struct vnode *lowervp; struct vnode *vp; vp = ap->a_vp; VI_LOCK(vp); xp = VTONULL(vp); if (xp && (lowervp = xp->null_lowervp)) { vholdnz(lowervp); VI_UNLOCK(vp); VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); vdrop(lowervp); } else { VI_UNLOCK(vp); *(ap->a_mpp) = NULL; } return (0); } static int null_vptofh(struct vop_vptofh_args *ap) { struct vnode *lvp; lvp = NULLVPTOLOWERVP(ap->a_vp); return VOP_VPTOFH(lvp, ap->a_fhp); } static int null_vptocnp(struct vop_vptocnp_args *ap) { struct vnode *vp = ap->a_vp; struct vnode **dvp = ap->a_vpp; struct vnode *lvp, *ldvp; struct mount *mp; int error, locked; locked = VOP_ISLOCKED(vp); lvp = NULLVPTOLOWERVP(vp); mp = vp->v_mount; error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) return (error); vhold(lvp); VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */ ldvp = lvp; vref(lvp); error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen); vdrop(lvp); if (error != 0) { vn_lock(vp, locked | LK_RETRY); vfs_unbusy(mp); return (ENOENT); } error = vn_lock(ldvp, LK_SHARED); if (error != 0) { vrele(ldvp); vn_lock(vp, locked | LK_RETRY); vfs_unbusy(mp); return (ENOENT); } error = null_nodeget(mp, ldvp, dvp); if (error == 0) { #ifdef DIAGNOSTIC NULLVPTOLOWERVP(*dvp); #endif VOP_UNLOCK(*dvp); /* keep reference on *dvp */ } vn_lock(vp, locked | LK_RETRY); vfs_unbusy(mp); return (error); } static int null_read_pgcache(struct vop_read_pgcache_args *ap) { struct vnode *lvp, *vp; struct null_node *xp; int error; vp = ap->a_vp; VI_LOCK(vp); xp = VTONULL(vp); if (xp == NULL) { VI_UNLOCK(vp); return (EJUSTRETURN); } lvp = xp->null_lowervp; vref(lvp); VI_UNLOCK(vp); error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred); vrele(lvp); return (error); } static int null_advlock(struct vop_advlock_args *ap) { struct vnode *lvp, *vp; struct null_node *xp; int error; vp = ap->a_vp; VI_LOCK(vp); xp = VTONULL(vp); if (xp == NULL) { VI_UNLOCK(vp); return (EBADF); } lvp = xp->null_lowervp; vref(lvp); VI_UNLOCK(vp); error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags); vrele(lvp); return (error); } /* * Avoid standard bypass, since lower dvp and vp could be no longer * valid after vput(). */ static int null_vput_pair(struct vop_vput_pair_args *ap) { struct mount *mp; struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp; int error, res; dvp = ap->a_dvp; ldvp = NULLVPTOLOWERVP(dvp); vref(ldvp); vpp = ap->a_vpp; vp = NULL; lvp = NULL; mp = NULL; if (vpp != NULL) vp = *vpp; if (vp != NULL) { lvp = NULLVPTOLOWERVP(vp); vref(lvp); if (!ap->a_unlock_vp) { vhold(vp); vhold(lvp); mp = vp->v_mount; vfs_ref(mp); } } res = VOP_VPUT_PAIR(ldvp, lvp != NULL ? &lvp : NULL, true); if (vp != NULL && ap->a_unlock_vp) vrele(vp); vrele(dvp); if (vp == NULL || ap->a_unlock_vp) return (res); /* lvp has been unlocked and vp might be reclaimed */ VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) { vput(vp); vget(lvp, LK_EXCLUSIVE | LK_RETRY); if (VN_IS_DOOMED(lvp)) { vput(lvp); vget(vp, LK_EXCLUSIVE | LK_RETRY); } else { error = null_nodeget(mp, lvp, &vp1); if (error == 0) { *vpp = vp1; } else { vget(vp, LK_EXCLUSIVE | LK_RETRY); } } vfs_unbusy(mp); } vdrop(lvp); vdrop(vp); vfs_rel(mp); return (res); } static int null_getlowvnode(struct vop_getlowvnode_args *ap) { struct vnode *vp, *vpl; vp = ap->a_vp; if (vn_lock(vp, LK_SHARED) != 0) return (EBADF); vpl = NULLVPTOLOWERVP(vp); vhold(vpl); VOP_UNLOCK(vp); VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags); vdrop(vpl); return (0); } /* * Global vfs data structures */ struct vop_vector null_vnodeops = { .vop_bypass = null_bypass, .vop_access = null_access, .vop_accessx = null_accessx, .vop_advlock = null_advlock, .vop_advlockpurge = vop_stdadvlockpurge, .vop_bmap = VOP_EOPNOTSUPP, .vop_stat = null_stat, .vop_getattr = null_getattr, .vop_getlowvnode = null_getlowvnode, .vop_getwritemount = null_getwritemount, .vop_inactive = null_inactive, .vop_need_inactive = null_need_inactive, .vop_islocked = vop_stdislocked, .vop_lock1 = null_lock, .vop_lookup = null_lookup, .vop_open = null_open, .vop_print = null_print, .vop_read_pgcache = null_read_pgcache, .vop_reclaim = null_reclaim, .vop_remove = null_remove, .vop_rename = null_rename, .vop_rmdir = null_rmdir, .vop_setattr = null_setattr, .vop_strategy = VOP_EOPNOTSUPP, .vop_unlock = null_unlock, .vop_vptocnp = null_vptocnp, .vop_vptofh = null_vptofh, .vop_add_writecount = null_add_writecount, .vop_vput_pair = null_vput_pair, .vop_copy_file_range = VOP_PANIC, }; VFS_VOP_VECTOR_REGISTER(null_vnodeops); diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c index e1048e4ba7ab..a930e3921ab3 100644 --- a/sys/fs/unionfs/union_vnops.c +++ b/sys/fs/unionfs/union_vnops.c @@ -1,3026 +1,3047 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry. * Copyright (c) 1992, 1993, 1994, 1995 * The Regents of the University of California. * Copyright (c) 2005, 2006, 2012 Masanori Ozawa , ONGS Inc. * Copyright (c) 2006, 2012 Daichi Goto * All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if 0 #define UNIONFS_INTERNAL_DEBUG(msg, args...) printf(msg, ## args) #define UNIONFS_IDBG_RENAME #else #define UNIONFS_INTERNAL_DEBUG(msg, args...) #endif #define KASSERT_UNIONFS_VNODE(vp) \ VNASSERT(((vp)->v_op == &unionfs_vnodeops), vp, \ ("%s: non-unionfs vnode", __func__)) +static bool +unionfs_lookup_isroot(struct componentname *cnp, struct vnode *dvp) +{ + struct nameidata *ndp; + + if (dvp == NULL) + return (false); + if ((dvp->v_vflag & VV_ROOT) != 0) + return (true); + ndp = vfs_lookup_nameidata(cnp); + if (ndp == NULL) + return (false); + return (vfs_lookup_isroot(ndp, dvp)); +} + static int unionfs_lookup(struct vop_cachedlookup_args *ap) { struct unionfs_node *dunp, *unp; struct vnode *dvp, *udvp, *ldvp, *vp, *uvp, *lvp, *dtmpvp; struct vattr va; struct componentname *cnp; struct thread *td; u_long nameiop; u_long cnflags; int lockflag; int lkflags; int error, uerror, lerror; lockflag = 0; error = uerror = lerror = ENOENT; cnp = ap->a_cnp; nameiop = cnp->cn_nameiop; cnflags = cnp->cn_flags; dvp = ap->a_dvp; dunp = VTOUNIONFS(dvp); udvp = dunp->un_uppervp; ldvp = dunp->un_lowervp; vp = uvp = lvp = NULLVP; td = curthread; *(ap->a_vpp) = NULLVP; UNIONFS_INTERNAL_DEBUG( "unionfs_lookup: enter: nameiop=%ld, flags=%lx, path=%s\n", nameiop, cnflags, cnp->cn_nameptr); if (dvp->v_type != VDIR) return (ENOTDIR); /* * If read-only and op is not LOOKUP, will return EROFS. */ if ((cnflags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && LOOKUP != nameiop) return (EROFS); /* * Note that a lookup is in-flight, and block if another lookup * is already in-flight against dvp. This is done because we may * end up dropping dvp's lock to look up a lower vnode or to create * a shadow directory, opening up the possibility of parallel lookups * against the same directory creating duplicate unionfs vnodes for * the same file(s). Note that if this function encounters an * in-progress lookup for the directory, it will block until the * lookup is complete and then return ERELOOKUP to allow any * existing unionfs vnode to be loaded from the VFS cache. * This is really a hack; filesystems that support MNTK_LOOKUP_SHARED * (which unionfs currently doesn't) seem to deal with this by using * the vfs_hash_* functions to manage a per-mount vnode cache keyed * by the inode number (or some roughly equivalent unique ID * usually assocated with the storage medium). It may make sense * for unionfs to adopt something similar as a replacement for its * current half-baked directory-only cache implementation, particularly * if we want to support MNTK_LOOKUP_SHARED here. */ error = unionfs_set_in_progress_flag(dvp, UNIONFS_LOOKUP_IN_PROGRESS); if (error != 0) return (error); /* * lookup dotdot */ if (cnflags & ISDOTDOT) { if (LOOKUP != nameiop && udvp == NULLVP) { error = EROFS; goto unionfs_lookup_return; } + if (unionfs_lookup_isroot(cnp, udvp) || + unionfs_lookup_isroot(cnp, ldvp)) { + error = ENOENT; + goto unionfs_lookup_return; + } + if (udvp != NULLVP) dtmpvp = udvp; else dtmpvp = ldvp; unionfs_forward_vop_start(dtmpvp, &lkflags); error = VOP_LOOKUP(dtmpvp, &vp, cnp); unionfs_forward_vop_finish(dvp, dtmpvp, lkflags); /* * Drop the lock and reference on vp. If the lookup was * successful, we'll either need to exchange vp's lock and * reference for the unionfs parent vnode's lock and * reference, or (if dvp was reclaimed) we'll need to drop * vp's lock and reference to return early. */ if (vp != NULLVP) vput(vp); dunp = VTOUNIONFS(dvp); if (error == 0 && dunp == NULL) error = ENOENT; if (error == 0) { dtmpvp = dunp->un_dvp; vref(dtmpvp); VOP_UNLOCK(dvp); *(ap->a_vpp) = dtmpvp; vn_lock(dtmpvp, cnp->cn_lkflags | LK_RETRY); if (VN_IS_DOOMED(dtmpvp)) { vput(dtmpvp); *(ap->a_vpp) = NULLVP; error = ENOENT; } vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); } goto unionfs_lookup_cleanup; } /* * Lookup lower layer. We do this before looking up the the upper * layer, as we may drop the upper parent directory's lock, and we * want to ensure the upper parent remains locked from the point of * lookup through any ensuing VOP that may require it to be locked. * The cost of this is that we may end up performing an unnecessary * lower layer lookup if a whiteout is present in the upper layer. */ if (ldvp != NULLVP && !(cnflags & DOWHITEOUT)) { struct componentname lcn; bool is_dot; if (udvp != NULLVP) { vref(ldvp); VOP_UNLOCK(dvp); vn_lock(ldvp, LK_EXCLUSIVE | LK_RETRY); } lcn = *cnp; /* always op is LOOKUP */ lcn.cn_nameiop = LOOKUP; lcn.cn_flags = cnflags; is_dot = false; if (udvp == NULLVP) unionfs_forward_vop_start(ldvp, &lkflags); lerror = VOP_LOOKUP(ldvp, &lvp, &lcn); if (udvp == NULLVP && unionfs_forward_vop_finish(dvp, ldvp, lkflags)) { if (lvp != NULLVP) VOP_UNLOCK(lvp); error = ENOENT; goto unionfs_lookup_cleanup; } if (udvp == NULLVP) cnp->cn_flags = lcn.cn_flags; if (lerror == 0) { if (ldvp == lvp) { /* is dot */ vrele(lvp); *(ap->a_vpp) = dvp; vref(dvp); is_dot = true; error = lerror; } else if (lvp != NULLVP) VOP_UNLOCK(lvp); } if (udvp != NULLVP) { vput(ldvp); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); if (VN_IS_DOOMED(dvp)) error = ENOENT; } if (is_dot) goto unionfs_lookup_return; else if (error != 0) goto unionfs_lookup_cleanup; } /* * lookup upper layer */ if (udvp != NULLVP) { bool iswhiteout = false; unionfs_forward_vop_start(udvp, &lkflags); uerror = VOP_LOOKUP(udvp, &uvp, cnp); if (unionfs_forward_vop_finish(dvp, udvp, lkflags)) { if (uvp != NULLVP) VOP_UNLOCK(uvp); error = ENOENT; goto unionfs_lookup_cleanup; } if (uerror == 0) { if (udvp == uvp) { /* is dot */ if (lvp != NULLVP) vrele(lvp); vrele(uvp); *(ap->a_vpp) = dvp; vref(dvp); error = uerror; goto unionfs_lookup_return; } else if (uvp != NULLVP) VOP_UNLOCK(uvp); } /* check whiteout */ if ((uerror == ENOENT || uerror == EJUSTRETURN) && (cnp->cn_flags & ISWHITEOUT)) iswhiteout = true; else if (VOP_GETATTR(udvp, &va, cnp->cn_cred) == 0 && (va.va_flags & OPAQUE)) iswhiteout = true; if (iswhiteout && lvp != NULLVP) { vrele(lvp); lvp = NULLVP; } #if 0 UNIONFS_INTERNAL_DEBUG( "unionfs_lookup: debug: whiteout=%d, path=%s\n", iswhiteout, cnp->cn_nameptr); #endif } /* * check lookup result */ if (uvp == NULLVP && lvp == NULLVP) { error = (udvp != NULLVP ? uerror : lerror); goto unionfs_lookup_return; } /* * check vnode type */ if (uvp != NULLVP && lvp != NULLVP && uvp->v_type != lvp->v_type) { vrele(lvp); lvp = NULLVP; } /* * check shadow dir */ if (uerror != 0 && uerror != EJUSTRETURN && udvp != NULLVP && lerror == 0 && lvp != NULLVP && lvp->v_type == VDIR && !(dvp->v_mount->mnt_flag & MNT_RDONLY) && (1 < cnp->cn_namelen || '.' != *(cnp->cn_nameptr))) { /* get unionfs vnode in order to create a new shadow dir. */ error = unionfs_nodeget(dvp->v_mount, NULLVP, lvp, dvp, &vp, cnp); if (error != 0) goto unionfs_lookup_cleanup; if (LK_SHARED == (cnp->cn_lkflags & LK_TYPE_MASK)) VOP_UNLOCK(vp); if (LK_EXCLUSIVE != VOP_ISLOCKED(vp)) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); lockflag = 1; } unp = VTOUNIONFS(vp); if (unp == NULL) error = ENOENT; else error = unionfs_mkshadowdir(dvp, vp, cnp, td); if (lockflag != 0) VOP_UNLOCK(vp); if (error != 0) { UNIONFSDEBUG( "unionfs_lookup: Unable to create shadow dir."); if ((cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) vput(vp); else vrele(vp); goto unionfs_lookup_cleanup; } /* * TODO: Since unionfs_mkshadowdir() relocks udvp after * creating the new directory, return ERELOOKUP here? */ if ((cnp->cn_lkflags & LK_TYPE_MASK) == LK_SHARED) vn_lock(vp, LK_SHARED | LK_RETRY); } /* * get unionfs vnode. */ else { if (uvp != NULLVP) error = uerror; else error = lerror; if (error != 0) goto unionfs_lookup_cleanup; error = unionfs_nodeget(dvp->v_mount, uvp, lvp, dvp, &vp, cnp); if (error != 0) { UNIONFSDEBUG( "unionfs_lookup: Unable to create unionfs vnode."); goto unionfs_lookup_cleanup; } } if (VN_IS_DOOMED(dvp) || VN_IS_DOOMED(vp)) { error = ENOENT; vput(vp); goto unionfs_lookup_cleanup; } *(ap->a_vpp) = vp; if (cnflags & MAKEENTRY) cache_enter(dvp, vp, cnp); unionfs_lookup_cleanup: if (uvp != NULLVP) vrele(uvp); if (lvp != NULLVP) vrele(lvp); if (error == ENOENT && (cnflags & MAKEENTRY) != 0 && !VN_IS_DOOMED(dvp)) cache_enter(dvp, NULLVP, cnp); unionfs_lookup_return: unionfs_clear_in_progress_flag(dvp, UNIONFS_LOOKUP_IN_PROGRESS); UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n", error); return (error); } static int unionfs_create(struct vop_create_args *ap) { struct unionfs_node *dunp; struct componentname *cnp; struct vnode *udvp; struct vnode *vp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_create: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); dunp = VTOUNIONFS(ap->a_dvp); cnp = ap->a_cnp; udvp = dunp->un_uppervp; error = EROFS; if (udvp != NULLVP) { int lkflags; bool vp_created = false; unionfs_forward_vop_start(udvp, &lkflags); error = VOP_CREATE(udvp, &vp, cnp, ap->a_vap); if (error == 0) vp_created = true; if (__predict_false(unionfs_forward_vop_finish(ap->a_dvp, udvp, lkflags)) && error == 0) { error = ENOENT; } if (error == 0) { VOP_UNLOCK(vp); error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULLVP, ap->a_dvp, ap->a_vpp, cnp); vrele(vp); } else if (vp_created) vput(vp); } UNIONFS_INTERNAL_DEBUG("unionfs_create: leave (%d)\n", error); return (error); } static int unionfs_whiteout(struct vop_whiteout_args *ap) { struct unionfs_node *dunp; struct componentname *cnp; struct vnode *udvp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_whiteout: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); dunp = VTOUNIONFS(ap->a_dvp); cnp = ap->a_cnp; udvp = dunp->un_uppervp; error = EOPNOTSUPP; if (udvp != NULLVP) { int lkflags; switch (ap->a_flags) { case CREATE: case DELETE: case LOOKUP: unionfs_forward_vop_start(udvp, &lkflags); error = VOP_WHITEOUT(udvp, cnp, ap->a_flags); unionfs_forward_vop_finish(ap->a_dvp, udvp, lkflags); break; default: error = EINVAL; break; } } UNIONFS_INTERNAL_DEBUG("unionfs_whiteout: leave (%d)\n", error); return (error); } static int unionfs_mknod(struct vop_mknod_args *ap) { struct unionfs_node *dunp; struct componentname *cnp; struct vnode *udvp; struct vnode *vp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_mknod: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); dunp = VTOUNIONFS(ap->a_dvp); cnp = ap->a_cnp; udvp = dunp->un_uppervp; error = EROFS; if (udvp != NULLVP) { int lkflags; bool vp_created = false; unionfs_forward_vop_start(udvp, &lkflags); error = VOP_MKNOD(udvp, &vp, cnp, ap->a_vap); if (error == 0) vp_created = true; if (__predict_false(unionfs_forward_vop_finish(ap->a_dvp, udvp, lkflags)) && error == 0) { error = ENOENT; } if (error == 0) { VOP_UNLOCK(vp); error = unionfs_nodeget(ap->a_dvp->v_mount, vp, NULLVP, ap->a_dvp, ap->a_vpp, cnp); vrele(vp); } else if (vp_created) vput(vp); } UNIONFS_INTERNAL_DEBUG("unionfs_mknod: leave (%d)\n", error); return (error); } enum unionfs_lkupgrade { UNIONFS_LKUPGRADE_SUCCESS, /* lock successfully upgraded */ UNIONFS_LKUPGRADE_ALREADY, /* lock already held exclusive */ UNIONFS_LKUPGRADE_DOOMED /* lock was upgraded, but vnode reclaimed */ }; static inline enum unionfs_lkupgrade unionfs_upgrade_lock(struct vnode *vp) { ASSERT_VOP_LOCKED(vp, __func__); if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) return (UNIONFS_LKUPGRADE_ALREADY); if (vn_lock(vp, LK_UPGRADE) != 0) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (VN_IS_DOOMED(vp)) return (UNIONFS_LKUPGRADE_DOOMED); } return (UNIONFS_LKUPGRADE_SUCCESS); } static inline void unionfs_downgrade_lock(struct vnode *vp, enum unionfs_lkupgrade status) { if (status != UNIONFS_LKUPGRADE_ALREADY) vn_lock(vp, LK_DOWNGRADE | LK_RETRY); } /* * Exchange the default (upper vnode) lock on a unionfs vnode for the lower * vnode lock, in support of operations that require access to the lower vnode * even when an upper vnode is present. We don't use vn_lock_pair() to hold * both vnodes at the same time, primarily because the caller may proceed * to issue VOPs to the lower layer which re-lock or perform other operations * which may not be safe in the presence of a locked vnode from another FS. * Moreover, vn_lock_pair()'s deadlock resolution approach can introduce * additional overhead that isn't necessary on these paths. * * vp must be a locked unionfs vnode; the lock state of this vnode is * returned through *lkflags for later use in unionfs_unlock_lvp(). * * Returns the locked lower vnode, or NULL if the lower vnode (and therefore * also the unionfs vnode above it) has been doomed. */ static struct vnode * unionfs_lock_lvp(struct vnode *vp, int *lkflags) { struct unionfs_node *unp; struct vnode *lvp; unp = VTOUNIONFS(vp); lvp = unp->un_lowervp; ASSERT_VOP_LOCKED(vp, __func__); ASSERT_VOP_UNLOCKED(lvp, __func__); *lkflags = VOP_ISLOCKED(vp); vref(lvp); VOP_UNLOCK(vp); vn_lock(lvp, *lkflags | LK_RETRY); if (VN_IS_DOOMED(lvp)) { vput(lvp); lvp = NULLVP; vn_lock(vp, *lkflags | LK_RETRY); } return (lvp); } /* * Undo a previous call to unionfs_lock_lvp(), restoring the default lock * on the unionfs vnode. This function reloads and returns the vnode * private data for the unionfs vnode, which will be NULL if the unionfs * vnode became doomed while its lock was dropped. The caller must check * for this case. */ static struct unionfs_node * unionfs_unlock_lvp(struct vnode *vp, struct vnode *lvp, int lkflags) { ASSERT_VOP_LOCKED(lvp, __func__); ASSERT_VOP_UNLOCKED(vp, __func__); vput(lvp); vn_lock(vp, lkflags | LK_RETRY); return (VTOUNIONFS(vp)); } static int unionfs_open(struct vop_open_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *vp; struct vnode *uvp; struct vnode *lvp; struct vnode *targetvp; struct ucred *cred; struct thread *td; int error; int lkflags; enum unionfs_lkupgrade lkstatus; bool lock_lvp, open_lvp; UNIONFS_INTERNAL_DEBUG("unionfs_open: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); error = 0; vp = ap->a_vp; targetvp = NULLVP; cred = ap->a_cred; td = ap->a_td; open_lvp = lock_lvp = false; /* * The executable loader path may call this function with vp locked * shared. If the vnode is reclaimed while upgrading, we can't safely * use unp or do anything else unionfs- specific. */ lkstatus = unionfs_upgrade_lock(vp); if (lkstatus == UNIONFS_LKUPGRADE_DOOMED) { error = ENOENT; goto unionfs_open_cleanup; } unp = VTOUNIONFS(vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; unionfs_get_node_status(unp, td, &unsp); if (unsp->uns_lower_opencnt > 0 || unsp->uns_upper_opencnt > 0) { /* vnode is already opend. */ if (unsp->uns_upper_opencnt > 0) targetvp = uvp; else targetvp = lvp; if (targetvp == lvp && (ap->a_mode & FWRITE) && lvp->v_type == VREG) targetvp = NULLVP; } if (targetvp == NULLVP) { if (uvp == NULLVP) { if ((ap->a_mode & FWRITE) && lvp->v_type == VREG) { error = unionfs_copyfile(vp, !(ap->a_mode & O_TRUNC), cred, td); if (error != 0) { unp = VTOUNIONFS(vp); goto unionfs_open_abort; } targetvp = uvp = unp->un_uppervp; } else targetvp = lvp; } else targetvp = uvp; } if (targetvp == uvp && uvp->v_type == VDIR && lvp != NULLVP && unsp->uns_lower_opencnt <= 0) open_lvp = true; else if (targetvp == lvp && uvp != NULLVP) lock_lvp = true; if (lock_lvp) { unp = NULL; lvp = unionfs_lock_lvp(vp, &lkflags); if (lvp == NULLVP) { error = ENOENT; goto unionfs_open_abort; } } else unionfs_forward_vop_start(targetvp, &lkflags); error = VOP_OPEN(targetvp, ap->a_mode, cred, td, ap->a_fp); if (lock_lvp) { unp = unionfs_unlock_lvp(vp, lvp, lkflags); if (unp == NULL && error == 0) error = ENOENT; } else if (unionfs_forward_vop_finish(vp, targetvp, lkflags)) error = error ? error : ENOENT; if (error != 0) goto unionfs_open_abort; if (targetvp == uvp) { if (open_lvp) { unp = NULL; lvp = unionfs_lock_lvp(vp, &lkflags); if (lvp == NULLVP) { error = ENOENT; goto unionfs_open_abort; } /* open lower for readdir */ error = VOP_OPEN(lvp, FREAD, cred, td, NULL); unp = unionfs_unlock_lvp(vp, lvp, lkflags); if (unp == NULL) { error = error ? error : ENOENT; goto unionfs_open_abort; } if (error != 0) { unionfs_forward_vop_start(uvp, &lkflags); VOP_CLOSE(uvp, ap->a_mode, cred, td); if (unionfs_forward_vop_finish(vp, uvp, lkflags)) unp = NULL; goto unionfs_open_abort; } unsp->uns_node_flag |= UNS_OPENL_4_READDIR; unsp->uns_lower_opencnt++; } unsp->uns_upper_opencnt++; } else { unsp->uns_lower_opencnt++; unsp->uns_lower_openmode = ap->a_mode; } vp->v_object = targetvp->v_object; unionfs_open_abort: if (error != 0 && unp != NULL) unionfs_tryrem_node_status(unp, unsp); unionfs_open_cleanup: unionfs_downgrade_lock(vp, lkstatus); UNIONFS_INTERNAL_DEBUG("unionfs_open: leave (%d)\n", error); return (error); } static int unionfs_close(struct vop_close_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct ucred *cred; struct thread *td; struct vnode *vp; struct vnode *uvp; struct vnode *lvp; struct vnode *ovp; int error; int lkflags; enum unionfs_lkupgrade lkstatus; bool lock_lvp; UNIONFS_INTERNAL_DEBUG("unionfs_close: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); vp = ap->a_vp; cred = ap->a_cred; td = ap->a_td; error = 0; lock_lvp = false; /* * If the vnode is reclaimed while upgrading, we can't safely use unp * or do anything else unionfs- specific. */ lkstatus = unionfs_upgrade_lock(vp); if (lkstatus == UNIONFS_LKUPGRADE_DOOMED) goto unionfs_close_cleanup; unp = VTOUNIONFS(vp); lvp = unp->un_lowervp; uvp = unp->un_uppervp; unsp = unionfs_find_node_status(unp, td); if (unsp == NULL || (unsp->uns_lower_opencnt <= 0 && unsp->uns_upper_opencnt <= 0)) { #ifdef DIAGNOSTIC if (unsp != NULL) printf("unionfs_close: warning: open count is 0\n"); #endif if (uvp != NULLVP) ovp = uvp; else ovp = lvp; } else if (unsp->uns_upper_opencnt > 0) ovp = uvp; else ovp = lvp; if (ovp == lvp && uvp != NULLVP) { lock_lvp = true; unp = NULL; lvp = unionfs_lock_lvp(vp, &lkflags); if (lvp == NULLVP) { error = ENOENT; goto unionfs_close_abort; } } else unionfs_forward_vop_start(ovp, &lkflags); error = VOP_CLOSE(ovp, ap->a_fflag, cred, td); if (lock_lvp) { unp = unionfs_unlock_lvp(vp, lvp, lkflags); if (unp == NULL && error == 0) error = ENOENT; } else if (unionfs_forward_vop_finish(vp, ovp, lkflags)) error = error ? error : ENOENT; if (error != 0) goto unionfs_close_abort; vp->v_object = ovp->v_object; if (ovp == uvp) { if (unsp != NULL && ((--unsp->uns_upper_opencnt) == 0)) { if (unsp->uns_node_flag & UNS_OPENL_4_READDIR) { unp = NULL; lvp = unionfs_lock_lvp(vp, &lkflags); if (lvp == NULLVP) { error = ENOENT; goto unionfs_close_abort; } VOP_CLOSE(lvp, FREAD, cred, td); unp = unionfs_unlock_lvp(vp, lvp, lkflags); if (unp == NULL) { error = ENOENT; goto unionfs_close_abort; } unsp->uns_node_flag &= ~UNS_OPENL_4_READDIR; unsp->uns_lower_opencnt--; } if (unsp->uns_lower_opencnt > 0) vp->v_object = lvp->v_object; } } else if (unsp != NULL) unsp->uns_lower_opencnt--; unionfs_close_abort: if (unp != NULL && unsp != NULL) unionfs_tryrem_node_status(unp, unsp); unionfs_close_cleanup: unionfs_downgrade_lock(vp, lkstatus); UNIONFS_INTERNAL_DEBUG("unionfs_close: leave (%d)\n", error); return (error); } /* * Check the access mode toward shadow file/dir. */ static int unionfs_check_corrected_access(accmode_t accmode, struct vattr *va, struct ucred *cred) { uid_t uid; /* upper side vnode's uid */ gid_t gid; /* upper side vnode's gid */ u_short vmode; /* upper side vnode's mode */ u_short mask; mask = 0; uid = va->va_uid; gid = va->va_gid; vmode = va->va_mode; /* check owner */ if (cred->cr_uid == uid) { if (accmode & VEXEC) mask |= S_IXUSR; if (accmode & VREAD) mask |= S_IRUSR; if (accmode & VWRITE) mask |= S_IWUSR; return ((vmode & mask) == mask ? 0 : EACCES); } /* check group */ if (groupmember(gid, cred)) { if (accmode & VEXEC) mask |= S_IXGRP; if (accmode & VREAD) mask |= S_IRGRP; if (accmode & VWRITE) mask |= S_IWGRP; return ((vmode & mask) == mask ? 0 : EACCES); } /* check other */ if (accmode & VEXEC) mask |= S_IXOTH; if (accmode & VREAD) mask |= S_IROTH; if (accmode & VWRITE) mask |= S_IWOTH; return ((vmode & mask) == mask ? 0 : EACCES); } static int unionfs_access(struct vop_access_args *ap) { struct unionfs_mount *ump; struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct thread *td; struct vattr va; accmode_t accmode; int error; UNIONFS_INTERNAL_DEBUG("unionfs_access: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount); unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; td = ap->a_td; accmode = ap->a_accmode; error = EACCES; if ((accmode & VWRITE) && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) { switch (ap->a_vp->v_type) { case VREG: case VDIR: case VLNK: return (EROFS); default: break; } } if (uvp != NULLVP) { error = VOP_ACCESS(uvp, accmode, ap->a_cred, td); UNIONFS_INTERNAL_DEBUG("unionfs_access: leave (%d)\n", error); return (error); } if (lvp != NULLVP) { if (accmode & VWRITE) { if ((ump->um_uppermp->mnt_flag & MNT_RDONLY) != 0) { switch (ap->a_vp->v_type) { case VREG: case VDIR: case VLNK: return (EROFS); default: break; } } else if (ap->a_vp->v_type == VREG || ap->a_vp->v_type == VDIR) { /* check shadow file/dir */ if (ump->um_copymode != UNIONFS_TRANSPARENT) { error = unionfs_create_uppervattr(ump, lvp, &va, ap->a_cred, td); if (error != 0) return (error); error = unionfs_check_corrected_access( accmode, &va, ap->a_cred); if (error != 0) return (error); } } accmode &= ~(VWRITE | VAPPEND); accmode |= VREAD; /* will copy to upper */ } error = VOP_ACCESS(lvp, accmode, ap->a_cred, td); } UNIONFS_INTERNAL_DEBUG("unionfs_access: leave (%d)\n", error); return (error); } static int unionfs_getattr(struct vop_getattr_args *ap) { struct unionfs_node *unp; struct unionfs_mount *ump; struct vnode *uvp; struct vnode *lvp; struct thread *td; struct vattr va; int error; UNIONFS_INTERNAL_DEBUG("unionfs_getattr: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount); uvp = unp->un_uppervp; lvp = unp->un_lowervp; td = curthread; if (uvp != NULLVP) { if ((error = VOP_GETATTR(uvp, ap->a_vap, ap->a_cred)) == 0) ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; UNIONFS_INTERNAL_DEBUG( "unionfs_getattr: leave mode=%o, uid=%d, gid=%d (%d)\n", ap->a_vap->va_mode, ap->a_vap->va_uid, ap->a_vap->va_gid, error); return (error); } error = VOP_GETATTR(lvp, ap->a_vap, ap->a_cred); if (error == 0 && (ump->um_uppermp->mnt_flag & MNT_RDONLY) == 0) { /* correct the attr toward shadow file/dir. */ if (ap->a_vp->v_type == VREG || ap->a_vp->v_type == VDIR) { unionfs_create_uppervattr_core(ump, ap->a_vap, &va, td); ap->a_vap->va_mode = va.va_mode; ap->a_vap->va_uid = va.va_uid; ap->a_vap->va_gid = va.va_gid; } } if (error == 0) ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; UNIONFS_INTERNAL_DEBUG( "unionfs_getattr: leave mode=%o, uid=%d, gid=%d (%d)\n", ap->a_vap->va_mode, ap->a_vap->va_uid, ap->a_vap->va_gid, error); return (error); } static int unionfs_setattr(struct vop_setattr_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct thread *td; struct vattr *vap; int error; UNIONFS_INTERNAL_DEBUG("unionfs_setattr: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); error = EROFS; unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; td = curthread; vap = ap->a_vap; if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) && (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL)) return (EROFS); if (uvp == NULLVP && lvp->v_type == VREG) { error = unionfs_copyfile(ap->a_vp, (vap->va_size != 0), ap->a_cred, td); if (error != 0) return (error); uvp = unp->un_uppervp; } if (uvp != NULLVP) { int lkflags; unionfs_forward_vop_start(uvp, &lkflags); error = VOP_SETATTR(uvp, vap, ap->a_cred); unionfs_forward_vop_finish(ap->a_vp, uvp, lkflags); } UNIONFS_INTERNAL_DEBUG("unionfs_setattr: leave (%d)\n", error); return (error); } static int unionfs_read(struct vop_read_args *ap) { struct unionfs_node *unp; struct vnode *tvp; int error; /* UNIONFS_INTERNAL_DEBUG("unionfs_read: enter\n"); */ KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); error = VOP_READ(tvp, ap->a_uio, ap->a_ioflag, ap->a_cred); /* UNIONFS_INTERNAL_DEBUG("unionfs_read: leave (%d)\n", error); */ return (error); } static int unionfs_write(struct vop_write_args *ap) { struct unionfs_node *unp; struct vnode *tvp; int error; int lkflags; /* UNIONFS_INTERNAL_DEBUG("unionfs_write: enter\n"); */ KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); unionfs_forward_vop_start(tvp, &lkflags); error = VOP_WRITE(tvp, ap->a_uio, ap->a_ioflag, ap->a_cred); unionfs_forward_vop_finish(ap->a_vp, tvp, lkflags); /* UNIONFS_INTERNAL_DEBUG("unionfs_write: leave (%d)\n", error); */ return (error); } static int unionfs_ioctl(struct vop_ioctl_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *ovp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_ioctl: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); unp = VTOUNIONFS(ap->a_vp); unionfs_get_node_status(unp, ap->a_td, &unsp); ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp); unionfs_tryrem_node_status(unp, unsp); VOP_UNLOCK(ap->a_vp); if (ovp == NULLVP) return (EBADF); error = VOP_IOCTL(ovp, ap->a_command, ap->a_data, ap->a_fflag, ap->a_cred, ap->a_td); UNIONFS_INTERNAL_DEBUG("unionfs_ioctl: leave (%d)\n", error); return (error); } static int unionfs_poll(struct vop_poll_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *ovp; KASSERT_UNIONFS_VNODE(ap->a_vp); vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); unp = VTOUNIONFS(ap->a_vp); unionfs_get_node_status(unp, ap->a_td, &unsp); ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp); unionfs_tryrem_node_status(unp, unsp); VOP_UNLOCK(ap->a_vp); if (ovp == NULLVP) return (EBADF); return (VOP_POLL(ovp, ap->a_events, ap->a_cred, ap->a_td)); } static int unionfs_fsync(struct vop_fsync_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *ovp; enum unionfs_lkupgrade lkstatus; int error, lkflags; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); lkstatus = unionfs_upgrade_lock(ap->a_vp); if (lkstatus == UNIONFS_LKUPGRADE_DOOMED) { unionfs_downgrade_lock(ap->a_vp, lkstatus); return (ENOENT); } unionfs_get_node_status(unp, ap->a_td, &unsp); ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp); unionfs_tryrem_node_status(unp, unsp); unionfs_downgrade_lock(ap->a_vp, lkstatus); if (ovp == NULLVP) return (EBADF); unionfs_forward_vop_start(ovp, &lkflags); error = VOP_FSYNC(ovp, ap->a_waitfor, ap->a_td); unionfs_forward_vop_finish(ap->a_vp, ovp, lkflags); return (error); } static int unionfs_remove(struct vop_remove_args *ap) { char *path; struct unionfs_node *dunp; struct unionfs_node *unp; struct unionfs_mount *ump; struct vnode *udvp; struct vnode *uvp; struct vnode *lvp; struct componentname *cnp; struct thread *td; int error; int pathlen; UNIONFS_INTERNAL_DEBUG("unionfs_remove: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); KASSERT_UNIONFS_VNODE(ap->a_vp); error = 0; dunp = VTOUNIONFS(ap->a_dvp); udvp = dunp->un_uppervp; cnp = ap->a_cnp; td = curthread; ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount); unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; path = unp->un_path; pathlen = unp->un_pathlen; if (udvp == NULLVP) return (EROFS); if (uvp != NULLVP) { int udvp_lkflags, uvp_lkflags; if (ump == NULL || ump->um_whitemode == UNIONFS_WHITE_ALWAYS || lvp != NULLVP) cnp->cn_flags |= DOWHITEOUT; unionfs_forward_vop_start_pair(udvp, &udvp_lkflags, uvp, &uvp_lkflags); error = VOP_REMOVE(udvp, uvp, cnp); unionfs_forward_vop_finish_pair(ap->a_dvp, udvp, udvp_lkflags, ap->a_vp, uvp, uvp_lkflags); } else if (lvp != NULLVP) { error = unionfs_mkwhiteout(ap->a_dvp, ap->a_vp, cnp, td, path, pathlen); } UNIONFS_INTERNAL_DEBUG("unionfs_remove: leave (%d)\n", error); return (error); } static int unionfs_link(struct vop_link_args *ap) { struct unionfs_node *dunp; struct unionfs_node *unp; struct vnode *udvp; struct vnode *uvp; struct componentname *cnp; struct thread *td; int error; UNIONFS_INTERNAL_DEBUG("unionfs_link: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_tdvp); KASSERT_UNIONFS_VNODE(ap->a_vp); error = 0; dunp = VTOUNIONFS(ap->a_tdvp); unp = NULL; udvp = dunp->un_uppervp; uvp = NULLVP; cnp = ap->a_cnp; td = curthread; if (udvp == NULLVP) return (EROFS); unp = VTOUNIONFS(ap->a_vp); if (unp->un_uppervp == NULLVP) { if (ap->a_vp->v_type != VREG) return (EOPNOTSUPP); VOP_UNLOCK(ap->a_tdvp); error = unionfs_copyfile(ap->a_vp, 1, cnp->cn_cred, td); vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY); if (error == 0) error = ERELOOKUP; return (error); } uvp = unp->un_uppervp; if (error == 0) { int udvp_lkflags, uvp_lkflags; unionfs_forward_vop_start_pair(udvp, &udvp_lkflags, uvp, &uvp_lkflags); error = VOP_LINK(udvp, uvp, cnp); unionfs_forward_vop_finish_pair(ap->a_tdvp, udvp, udvp_lkflags, ap->a_vp, uvp, uvp_lkflags); } UNIONFS_INTERNAL_DEBUG("unionfs_link: leave (%d)\n", error); return (error); } static int unionfs_rename(struct vop_rename_args *ap) { struct vnode *fdvp; struct vnode *fvp; struct componentname *fcnp; struct vnode *tdvp; struct vnode *tvp; struct componentname *tcnp; struct thread *td; /* rename target vnodes */ struct vnode *rfdvp; struct vnode *rfvp; struct vnode *rtdvp; struct vnode *rtvp; struct unionfs_node *unp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_rename: enter\n"); error = 0; fdvp = ap->a_fdvp; fvp = ap->a_fvp; fcnp = ap->a_fcnp; tdvp = ap->a_tdvp; tvp = ap->a_tvp; tcnp = ap->a_tcnp; td = curthread; rfdvp = fdvp; rfvp = fvp; rtdvp = tdvp; rtvp = tvp; /* check for cross device rename */ if (fvp->v_mount != tdvp->v_mount || (tvp != NULLVP && fvp->v_mount != tvp->v_mount)) { if (fvp->v_op != &unionfs_vnodeops) error = ENODEV; else error = EXDEV; goto unionfs_rename_abort; } /* Renaming a file to itself has no effect. */ if (fvp == tvp) goto unionfs_rename_abort; KASSERT_UNIONFS_VNODE(tdvp); if (tvp != NULLVP) KASSERT_UNIONFS_VNODE(tvp); if (fdvp != tdvp) VI_LOCK(fdvp); unp = VTOUNIONFS(fdvp); if (unp == NULL) { if (fdvp != tdvp) VI_UNLOCK(fdvp); error = ENOENT; goto unionfs_rename_abort; } #ifdef UNIONFS_IDBG_RENAME UNIONFS_INTERNAL_DEBUG("fdvp=%p, ufdvp=%p, lfdvp=%p\n", fdvp, unp->un_uppervp, unp->un_lowervp); #endif if (unp->un_uppervp == NULLVP) { error = ENODEV; } else { rfdvp = unp->un_uppervp; vref(rfdvp); } if (fdvp != tdvp) VI_UNLOCK(fdvp); if (error != 0) goto unionfs_rename_abort; VI_LOCK(fvp); unp = VTOUNIONFS(fvp); if (unp == NULL) { VI_UNLOCK(fvp); error = ENOENT; goto unionfs_rename_abort; } #ifdef UNIONFS_IDBG_RENAME UNIONFS_INTERNAL_DEBUG("fvp=%p, ufvp=%p, lfvp=%p\n", fvp, unp->un_uppervp, unp->un_lowervp); #endif /* * If we only have a lower vnode, copy the source file to the upper * FS so that the rename operation can be issued against the upper FS. */ if (unp->un_uppervp == NULLVP) { bool unlock_fdvp = false, relock_tdvp = false; VI_UNLOCK(fvp); if (tvp != NULLVP) VOP_UNLOCK(tvp); if (fvp->v_type == VREG) { /* * For regular files, unionfs_copyfile() will expect * fdvp's upper parent directory vnode to be unlocked * and will temporarily lock it. If fdvp == tdvp, we * should unlock tdvp to avoid recursion on tdvp's * lock. If fdvp != tdvp, we should also unlock tdvp * to avoid potential deadlock due to holding tdvp's * lock while locking unrelated vnodes associated with * fdvp/fvp. */ VOP_UNLOCK(tdvp); relock_tdvp = true; } else if (fvp->v_type == VDIR && tdvp != fdvp) { /* * For directories, unionfs_mkshadowdir() will expect * fdvp's upper parent directory vnode to be locked * and will temporarily unlock it. If fdvp == tdvp, * we can therefore leave tdvp locked. If fdvp != * tdvp, we should exchange the lock on tdvp for a * lock on fdvp. */ VOP_UNLOCK(tdvp); unlock_fdvp = true; relock_tdvp = true; vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY); } vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY); unp = VTOUNIONFS(fvp); if (unp == NULL) error = ENOENT; else if (unp->un_uppervp == NULLVP) { switch (fvp->v_type) { case VREG: error = unionfs_copyfile(fvp, 1, fcnp->cn_cred, td); break; case VDIR: error = unionfs_mkshadowdir(fdvp, fvp, fcnp, td); break; default: error = ENODEV; break; } } VOP_UNLOCK(fvp); if (unlock_fdvp) VOP_UNLOCK(fdvp); if (relock_tdvp) vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY); if (tvp != NULLVP) vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY); /* * Since we've dropped tdvp's lock at some point in the copy * sequence above, force the caller to re-drive the lookup * in case the relationship between tdvp and tvp has changed. */ if (error == 0) error = ERELOOKUP; goto unionfs_rename_abort; } if (unp->un_lowervp != NULLVP) fcnp->cn_flags |= DOWHITEOUT; rfvp = unp->un_uppervp; vref(rfvp); VI_UNLOCK(fvp); unp = VTOUNIONFS(tdvp); #ifdef UNIONFS_IDBG_RENAME UNIONFS_INTERNAL_DEBUG("tdvp=%p, utdvp=%p, ltdvp=%p\n", tdvp, unp->un_uppervp, unp->un_lowervp); #endif if (unp->un_uppervp == NULLVP) { error = ENODEV; goto unionfs_rename_abort; } rtdvp = unp->un_uppervp; vref(rtdvp); if (tvp != NULLVP) { unp = VTOUNIONFS(tvp); if (unp == NULL) { error = ENOENT; goto unionfs_rename_abort; } #ifdef UNIONFS_IDBG_RENAME UNIONFS_INTERNAL_DEBUG("tvp=%p, utvp=%p, ltvp=%p\n", tvp, unp->un_uppervp, unp->un_lowervp); #endif if (unp->un_uppervp == NULLVP) rtvp = NULLVP; else { if (tvp->v_type == VDIR) { error = EINVAL; goto unionfs_rename_abort; } rtvp = unp->un_uppervp; vref(rtvp); } } if (rfvp == rtvp) goto unionfs_rename_abort; error = VOP_RENAME(rfdvp, rfvp, fcnp, rtdvp, rtvp, tcnp); if (error == 0) { if (rtvp != NULLVP && rtvp->v_type == VDIR) cache_purge(tdvp); if (fvp->v_type == VDIR && fdvp != tdvp) cache_purge(fdvp); } if (tdvp != rtdvp) vrele(tdvp); if (tvp != rtvp && tvp != NULLVP) { if (rtvp == NULLVP) vput(tvp); else vrele(tvp); } if (fdvp != rfdvp) vrele(fdvp); if (fvp != rfvp) vrele(fvp); UNIONFS_INTERNAL_DEBUG("unionfs_rename: leave (%d)\n", error); return (error); unionfs_rename_abort: vput(tdvp); if (tdvp != rtdvp) vrele(rtdvp); if (tvp != NULLVP) { if (tdvp != tvp) vput(tvp); else vrele(tvp); } if (tvp != rtvp && rtvp != NULLVP) vrele(rtvp); if (fdvp != rfdvp) vrele(rfdvp); if (fvp != rfvp) vrele(rfvp); vrele(fdvp); vrele(fvp); UNIONFS_INTERNAL_DEBUG("unionfs_rename: leave (%d)\n", error); return (error); } static int unionfs_mkdir(struct vop_mkdir_args *ap) { struct unionfs_node *dunp; struct componentname *cnp; struct vnode *dvp; struct vnode *udvp; struct vnode *uvp; struct vattr va; int error; int lkflags; UNIONFS_INTERNAL_DEBUG("unionfs_mkdir: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); error = EROFS; dvp = ap->a_dvp; dunp = VTOUNIONFS(dvp); cnp = ap->a_cnp; lkflags = cnp->cn_lkflags; udvp = dunp->un_uppervp; if (udvp != NULLVP) { /* check opaque */ if (!(cnp->cn_flags & ISWHITEOUT)) { error = VOP_GETATTR(udvp, &va, cnp->cn_cred); if (error != 0) goto unionfs_mkdir_cleanup; if ((va.va_flags & OPAQUE) != 0) cnp->cn_flags |= ISWHITEOUT; } int udvp_lkflags; bool uvp_created = false; unionfs_forward_vop_start(udvp, &udvp_lkflags); error = VOP_MKDIR(udvp, &uvp, cnp, ap->a_vap); if (error == 0) uvp_created = true; if (__predict_false(unionfs_forward_vop_finish(dvp, udvp, udvp_lkflags)) && error == 0) error = ENOENT; if (error == 0) { VOP_UNLOCK(uvp); cnp->cn_lkflags = LK_EXCLUSIVE; error = unionfs_nodeget(dvp->v_mount, uvp, NULLVP, dvp, ap->a_vpp, cnp); vrele(uvp); cnp->cn_lkflags = lkflags; } else if (uvp_created) vput(uvp); } unionfs_mkdir_cleanup: UNIONFS_INTERNAL_DEBUG("unionfs_mkdir: leave (%d)\n", error); return (error); } static int unionfs_rmdir(struct vop_rmdir_args *ap) { struct unionfs_node *dunp; struct unionfs_node *unp; struct unionfs_mount *ump; struct componentname *cnp; struct thread *td; struct vnode *udvp; struct vnode *uvp; struct vnode *lvp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_rmdir: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); KASSERT_UNIONFS_VNODE(ap->a_vp); error = 0; dunp = VTOUNIONFS(ap->a_dvp); unp = VTOUNIONFS(ap->a_vp); cnp = ap->a_cnp; td = curthread; udvp = dunp->un_uppervp; uvp = unp->un_uppervp; lvp = unp->un_lowervp; if (udvp == NULLVP) return (EROFS); if (udvp == uvp) return (EOPNOTSUPP); if (uvp != NULLVP) { if (lvp != NULLVP) { /* * We need to keep dvp and vp's upper vnodes locked * going into the VOP_RMDIR() call, but the empty * directory check also requires the lower vnode lock. * For this third, cross-filesystem lock we use a * similar approach taken by various FS' VOP_RENAME * implementations (which require 2-4 vnode locks). * First we attempt a NOWAIT acquisition, then if * that fails we drops the other two vnode locks, * acquire lvp's lock in the normal fashion to reduce * the likelihood of spinning on it in the future, * then drop, reacquire the other locks, and return * ERELOOKUP to re-drive the lookup in case the dvp-> * vp relationship has changed. */ if (vn_lock(lvp, LK_SHARED | LK_NOWAIT) != 0) { VOP_UNLOCK(ap->a_vp); VOP_UNLOCK(ap->a_dvp); vn_lock(lvp, LK_SHARED | LK_RETRY); VOP_UNLOCK(lvp); vn_lock(ap->a_dvp, LK_EXCLUSIVE | LK_RETRY); vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); return (ERELOOKUP); } error = unionfs_check_rmdir(ap->a_vp, cnp->cn_cred, td); /* * It's possible for a direct operation on the lower FS * to make the lower directory non-empty after we drop * the lock, but it's also possible for the upper-layer * VOP_RMDIR to relock udvp/uvp which would lead to * LOR if we kept lvp locked across that call. */ VOP_UNLOCK(lvp); if (error != 0) return (error); } ump = MOUNTTOUNIONFSMOUNT(ap->a_vp->v_mount); if (ump->um_whitemode == UNIONFS_WHITE_ALWAYS || lvp != NULLVP) cnp->cn_flags |= (DOWHITEOUT | IGNOREWHITEOUT); int udvp_lkflags, uvp_lkflags; unionfs_forward_vop_start_pair(udvp, &udvp_lkflags, uvp, &uvp_lkflags); error = VOP_RMDIR(udvp, uvp, cnp); unionfs_forward_vop_finish_pair(ap->a_dvp, udvp, udvp_lkflags, ap->a_vp, uvp, uvp_lkflags); } else if (lvp != NULLVP) { error = unionfs_mkwhiteout(ap->a_dvp, ap->a_vp, cnp, td, unp->un_path, unp->un_pathlen); } if (error == 0) { cache_purge(ap->a_dvp); cache_purge(ap->a_vp); } UNIONFS_INTERNAL_DEBUG("unionfs_rmdir: leave (%d)\n", error); return (error); } static int unionfs_symlink(struct vop_symlink_args *ap) { struct unionfs_node *dunp; struct componentname *cnp; struct vnode *udvp; struct vnode *uvp; int error; int lkflags; UNIONFS_INTERNAL_DEBUG("unionfs_symlink: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_dvp); error = EROFS; dunp = VTOUNIONFS(ap->a_dvp); cnp = ap->a_cnp; lkflags = cnp->cn_lkflags; udvp = dunp->un_uppervp; if (udvp != NULLVP) { int udvp_lkflags; bool uvp_created = false; unionfs_forward_vop_start(udvp, &udvp_lkflags); error = VOP_SYMLINK(udvp, &uvp, cnp, ap->a_vap, ap->a_target); if (error == 0) uvp_created = true; if (__predict_false(unionfs_forward_vop_finish(ap->a_dvp, udvp, udvp_lkflags)) && error == 0) error = ENOENT; if (error == 0) { VOP_UNLOCK(uvp); cnp->cn_lkflags = LK_EXCLUSIVE; error = unionfs_nodeget(ap->a_dvp->v_mount, uvp, NULLVP, ap->a_dvp, ap->a_vpp, cnp); vrele(uvp); cnp->cn_lkflags = lkflags; } else if (uvp_created) vput(uvp); } UNIONFS_INTERNAL_DEBUG("unionfs_symlink: leave (%d)\n", error); return (error); } static int unionfs_readdir(struct vop_readdir_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct uio *uio; struct vnode *vp; struct vnode *uvp; struct vnode *lvp; struct thread *td; struct vattr va; uint64_t *cookies_bk; int error; int eofflag; int lkflags; int ncookies_bk; int uio_offset_bk; enum unionfs_lkupgrade lkstatus; UNIONFS_INTERNAL_DEBUG("unionfs_readdir: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); error = 0; eofflag = 0; uio_offset_bk = 0; uio = ap->a_uio; uvp = NULLVP; lvp = NULLVP; td = uio->uio_td; ncookies_bk = 0; cookies_bk = NULL; vp = ap->a_vp; if (vp->v_type != VDIR) return (ENOTDIR); /* * If the vnode is reclaimed while upgrading, we can't safely use unp * or do anything else unionfs- specific. */ lkstatus = unionfs_upgrade_lock(vp); if (lkstatus == UNIONFS_LKUPGRADE_DOOMED) error = EBADF; if (error == 0) { unp = VTOUNIONFS(vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; /* check the open count. unionfs needs open before readdir. */ unionfs_get_node_status(unp, td, &unsp); if ((uvp != NULLVP && unsp->uns_upper_opencnt <= 0) || (lvp != NULLVP && unsp->uns_lower_opencnt <= 0)) { unionfs_tryrem_node_status(unp, unsp); error = EBADF; } } unionfs_downgrade_lock(vp, lkstatus); if (error != 0) goto unionfs_readdir_exit; /* check opaque */ if (uvp != NULLVP && lvp != NULLVP) { if ((error = VOP_GETATTR(uvp, &va, ap->a_cred)) != 0) goto unionfs_readdir_exit; if (va.va_flags & OPAQUE) lvp = NULLVP; } /* upper only */ if (uvp != NULLVP && lvp == NULLVP) { unionfs_forward_vop_start(uvp, &lkflags); error = VOP_READDIR(uvp, uio, ap->a_cred, ap->a_eofflag, ap->a_ncookies, ap->a_cookies); if (unionfs_forward_vop_finish(vp, uvp, lkflags)) error = error ? error : ENOENT; else unsp->uns_readdir_status = 0; goto unionfs_readdir_exit; } /* lower only */ if (uvp == NULLVP && lvp != NULLVP) { unionfs_forward_vop_start(lvp, &lkflags); error = VOP_READDIR(lvp, uio, ap->a_cred, ap->a_eofflag, ap->a_ncookies, ap->a_cookies); if (unionfs_forward_vop_finish(vp, lvp, lkflags)) error = error ? error : ENOENT; else unsp->uns_readdir_status = 2; goto unionfs_readdir_exit; } /* * readdir upper and lower */ KASSERT(uvp != NULLVP, ("unionfs_readdir: null upper vp")); KASSERT(lvp != NULLVP, ("unionfs_readdir: null lower vp")); if (uio->uio_offset == 0) unsp->uns_readdir_status = 0; if (unsp->uns_readdir_status == 0) { /* read upper */ unionfs_forward_vop_start(uvp, &lkflags); error = VOP_READDIR(uvp, uio, ap->a_cred, &eofflag, ap->a_ncookies, ap->a_cookies); if (unionfs_forward_vop_finish(vp, uvp, lkflags) && error == 0) error = ENOENT; if (error != 0 || eofflag == 0) goto unionfs_readdir_exit; unsp->uns_readdir_status = 1; /* * UFS(and other FS) needs size of uio_resid larger than * DIRBLKSIZ. * size of DIRBLKSIZ equals DEV_BSIZE. * (see: ufs/ufs/ufs_vnops.c ufs_readdir func , ufs/ufs/dir.h) */ if (uio->uio_resid <= (uio->uio_resid & (DEV_BSIZE -1))) goto unionfs_readdir_exit; /* * Backup cookies. * It prepares to readdir in lower. */ if (ap->a_ncookies != NULL) { ncookies_bk = *(ap->a_ncookies); *(ap->a_ncookies) = 0; } if (ap->a_cookies != NULL) { cookies_bk = *(ap->a_cookies); *(ap->a_cookies) = NULL; } } /* initialize for readdir in lower */ if (unsp->uns_readdir_status == 1) { unsp->uns_readdir_status = 2; /* * Backup uio_offset. See the comment after the * VOP_READDIR call on the lower layer. */ uio_offset_bk = uio->uio_offset; uio->uio_offset = 0; } lvp = unionfs_lock_lvp(vp, &lkflags); if (lvp == NULL) { error = ENOENT; goto unionfs_readdir_exit; } /* read lower */ error = VOP_READDIR(lvp, uio, ap->a_cred, ap->a_eofflag, ap->a_ncookies, ap->a_cookies); unp = unionfs_unlock_lvp(vp, lvp, lkflags); if (unp == NULL && error == 0) error = ENOENT; /* * We can't return an uio_offset of 0: this would trigger an * infinite loop, because the next call to unionfs_readdir would * always restart with the upper layer (uio_offset == 0) and * always return some data. * * This happens when the lower layer root directory is removed. * (A root directory deleting of unionfs should not be permitted. * But current VFS can not do it.) */ if (uio->uio_offset == 0) uio->uio_offset = uio_offset_bk; if (cookies_bk != NULL) { /* merge cookies */ int size; uint64_t *newcookies, *pos; size = *(ap->a_ncookies) + ncookies_bk; newcookies = (uint64_t *) malloc(size * sizeof(*newcookies), M_TEMP, M_WAITOK); pos = newcookies; memcpy(pos, cookies_bk, ncookies_bk * sizeof(*newcookies)); pos += ncookies_bk; memcpy(pos, *(ap->a_cookies), *(ap->a_ncookies) * sizeof(*newcookies)); free(cookies_bk, M_TEMP); free(*(ap->a_cookies), M_TEMP); *(ap->a_ncookies) = size; *(ap->a_cookies) = newcookies; } unionfs_readdir_exit: if (error != 0 && ap->a_eofflag != NULL) *(ap->a_eofflag) = 1; UNIONFS_INTERNAL_DEBUG("unionfs_readdir: leave (%d)\n", error); return (error); } static int unionfs_readlink(struct vop_readlink_args *ap) { struct unionfs_node *unp; struct vnode *vp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_readlink: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); UNIONFS_INTERNAL_DEBUG("unionfs_readlink: leave (%d)\n", error); return (error); } static int unionfs_getwritemount(struct vop_getwritemount_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *vp, *ovp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_getwritemount: enter\n"); error = 0; vp = ap->a_vp; uvp = NULLVP; VI_LOCK(vp); unp = VTOUNIONFS(vp); if (unp != NULL) uvp = unp->un_uppervp; /* * If our node has no upper vnode, check the parent directory. * We may be initiating a write operation that will produce a * new upper vnode through CoW. */ if (uvp == NULLVP && unp != NULL) { ovp = vp; vp = unp->un_dvp; /* * Only the root vnode should have an empty parent, but it * should not have an empty uppervp, so we shouldn't get here. */ VNASSERT(vp != NULL, ovp, ("%s: NULL parent vnode", __func__)); VI_UNLOCK(ovp); VI_LOCK(vp); unp = VTOUNIONFS(vp); if (unp != NULL) uvp = unp->un_uppervp; if (uvp == NULLVP) error = EACCES; } if (uvp != NULLVP) { vholdnz(uvp); VI_UNLOCK(vp); error = VOP_GETWRITEMOUNT(uvp, ap->a_mpp); vdrop(uvp); } else { VI_UNLOCK(vp); *(ap->a_mpp) = NULL; } UNIONFS_INTERNAL_DEBUG("unionfs_getwritemount: leave (%d)\n", error); return (error); } static int unionfs_inactive(struct vop_inactive_args *ap) { ap->a_vp->v_object = NULL; vrecycle(ap->a_vp); return (0); } static int unionfs_reclaim(struct vop_reclaim_args *ap) { /* UNIONFS_INTERNAL_DEBUG("unionfs_reclaim: enter\n"); */ unionfs_noderem(ap->a_vp); /* UNIONFS_INTERNAL_DEBUG("unionfs_reclaim: leave\n"); */ return (0); } static int unionfs_print(struct vop_print_args *ap) { struct unionfs_node *unp; /* struct unionfs_node_status *unsp; */ unp = VTOUNIONFS(ap->a_vp); /* unionfs_get_node_status(unp, curthread, &unsp); */ printf("unionfs_vp=%p, uppervp=%p, lowervp=%p\n", ap->a_vp, unp->un_uppervp, unp->un_lowervp); /* printf("unionfs opencnt: uppervp=%d, lowervp=%d\n", unsp->uns_upper_opencnt, unsp->uns_lower_opencnt); */ if (unp->un_uppervp != NULLVP) vn_printf(unp->un_uppervp, "unionfs: upper "); if (unp->un_lowervp != NULLVP) vn_printf(unp->un_lowervp, "unionfs: lower "); return (0); } static int unionfs_lock(struct vop_lock1_args *ap) { struct unionfs_node *unp; struct vnode *vp; struct vnode *tvp; int error; int flags; bool lvp_locked; error = 0; flags = ap->a_flags; vp = ap->a_vp; if (LK_RELEASE == (flags & LK_TYPE_MASK) || !(flags & LK_TYPE_MASK)) return (VOP_UNLOCK_FLAGS(vp, flags | LK_RELEASE)); unionfs_lock_restart: /* * We currently need the interlock here to ensure we can safely * access the unionfs vnode's private data. We may be able to * eliminate this extra locking by instead using vfs_smr_enter() * and vn_load_v_data_smr() here in conjunction with an SMR UMA * zone for unionfs nodes. */ if ((flags & LK_INTERLOCK) == 0) VI_LOCK(vp); else flags &= ~LK_INTERLOCK; unp = VTOUNIONFS(vp); if (unp == NULL) { VI_UNLOCK(vp); ap->a_flags = flags; return (vop_stdlock(ap)); } if (unp->un_uppervp != NULL) { tvp = unp->un_uppervp; lvp_locked = false; } else { tvp = unp->un_lowervp; lvp_locked = true; } /* * During unmount, the root vnode lock may be taken recursively, * because it may share the same v_vnlock field as the vnode covered by * the unionfs mount. The covered vnode is locked across VFS_UNMOUNT(), * and the same lock may be taken recursively here during vflush() * issued by unionfs_unmount(). */ if ((flags & LK_TYPE_MASK) == LK_EXCLUSIVE && (vp->v_vflag & VV_ROOT) != 0) flags |= LK_CANRECURSE; vholdnz(tvp); VI_UNLOCK(vp); error = VOP_LOCK(tvp, flags); vdrop(tvp); if (error == 0 && (lvp_locked || VTOUNIONFS(vp) == NULL)) { /* * After dropping the interlock above, there exists a window * in which another thread may acquire the lower vnode lock * and then either doom the unionfs vnode or create an upper * vnode. In either case, we will effectively be holding the * wrong lock, so we must drop the lower vnode lock and * restart the lock operation. * * If unp is not already NULL, we assume that we can safely * access it because we currently hold lvp's lock. * unionfs_noderem() acquires lvp's lock before freeing * the vnode private data, ensuring it can't be concurrently * freed while we are using it here. Likewise, * unionfs_node_update() acquires lvp's lock before installing * an upper vnode. Without those guarantees, we would need to * reacquire the vnode interlock here. * Note that unionfs_noderem() doesn't acquire lvp's lock if * this is the root vnode, but the root vnode should always * have an upper vnode and therefore we should never use its * lower vnode lock here. */ unp = VTOUNIONFS(vp); if (unp == NULL || unp->un_uppervp != NULLVP) { VOP_UNLOCK(tvp); /* * If we previously held the lock, the upgrade may * have temporarily dropped the lock, in which case * concurrent dooming or copy-up will necessitate * acquiring a different lock. Since we never held * the new lock, LK_UPGRADE must be cleared here to * avoid triggering a lockmgr panic. */ if (flags & LK_UPGRADE) flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; VNASSERT((flags & LK_DOWNGRADE) == 0, vp, ("%s: vnode doomed during downgrade", __func__)); goto unionfs_lock_restart; } } return (error); } static int unionfs_unlock(struct vop_unlock_args *ap) { struct vnode *vp; struct vnode *tvp; struct unionfs_node *unp; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); vp = ap->a_vp; unp = VTOUNIONFS(vp); if (unp == NULL) return (vop_stdunlock(ap)); tvp = (unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp); vholdnz(tvp); error = VOP_UNLOCK(tvp); vdrop(tvp); return (error); } static int unionfs_pathconf(struct vop_pathconf_args *ap) { struct unionfs_node *unp; struct vnode *vp; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); return (VOP_PATHCONF(vp, ap->a_name, ap->a_retval)); } static int unionfs_advlock(struct vop_advlock_args *ap) { struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *vp; struct vnode *uvp; struct thread *td; int error; UNIONFS_INTERNAL_DEBUG("unionfs_advlock: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); vp = ap->a_vp; td = curthread; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; if (uvp == NULLVP) { error = unionfs_copyfile(ap->a_vp, 1, td->td_ucred, td); if (error != 0) goto unionfs_advlock_abort; uvp = unp->un_uppervp; unionfs_get_node_status(unp, td, &unsp); if (unsp->uns_lower_opencnt > 0) { /* try reopen the vnode */ error = VOP_OPEN(uvp, unsp->uns_lower_openmode, td->td_ucred, td, NULL); if (error) goto unionfs_advlock_abort; unsp->uns_upper_opencnt++; VOP_CLOSE(unp->un_lowervp, unsp->uns_lower_openmode, td->td_ucred, td); unsp->uns_lower_opencnt--; } else unionfs_tryrem_node_status(unp, unsp); } VOP_UNLOCK(vp); error = VOP_ADVLOCK(uvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags); UNIONFS_INTERNAL_DEBUG("unionfs_advlock: leave (%d)\n", error); return error; unionfs_advlock_abort: VOP_UNLOCK(vp); UNIONFS_INTERNAL_DEBUG("unionfs_advlock: leave (%d)\n", error); return error; } static int unionfs_strategy(struct vop_strategy_args *ap) { struct unionfs_node *unp; struct vnode *vp; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); #ifdef DIAGNOSTIC if (vp == NULLVP) panic("unionfs_strategy: nullvp"); if (ap->a_bp->b_iocmd == BIO_WRITE && vp == unp->un_lowervp) panic("unionfs_strategy: writing to lowervp"); #endif return (VOP_STRATEGY(vp, ap->a_bp)); } static int unionfs_getacl(struct vop_getacl_args *ap) { struct unionfs_node *unp; struct vnode *vp; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); UNIONFS_INTERNAL_DEBUG("unionfs_getacl: enter\n"); error = VOP_GETACL(vp, ap->a_type, ap->a_aclp, ap->a_cred, ap->a_td); UNIONFS_INTERNAL_DEBUG("unionfs_getacl: leave (%d)\n", error); return (error); } static int unionfs_setacl(struct vop_setacl_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct thread *td; int error; UNIONFS_INTERNAL_DEBUG("unionfs_setacl: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); error = EROFS; unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; td = ap->a_td; if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (uvp == NULLVP && lvp->v_type == VREG) { if ((error = unionfs_copyfile(ap->a_vp, 1, ap->a_cred, td)) != 0) return (error); uvp = unp->un_uppervp; } if (uvp != NULLVP) { int lkflags; unionfs_forward_vop_start(uvp, &lkflags); error = VOP_SETACL(uvp, ap->a_type, ap->a_aclp, ap->a_cred, td); unionfs_forward_vop_finish(ap->a_vp, uvp, lkflags); } UNIONFS_INTERNAL_DEBUG("unionfs_setacl: leave (%d)\n", error); return (error); } static int unionfs_aclcheck(struct vop_aclcheck_args *ap) { struct unionfs_node *unp; struct vnode *vp; int error; UNIONFS_INTERNAL_DEBUG("unionfs_aclcheck: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); error = VOP_ACLCHECK(vp, ap->a_type, ap->a_aclp, ap->a_cred, ap->a_td); UNIONFS_INTERNAL_DEBUG("unionfs_aclcheck: leave (%d)\n", error); return (error); } static int unionfs_openextattr(struct vop_openextattr_args *ap) { struct unionfs_node *unp; struct vnode *vp; struct vnode *tvp; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); vp = ap->a_vp; unp = VTOUNIONFS(vp); tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); if ((tvp == unp->un_uppervp && (unp->un_flag & UNIONFS_OPENEXTU)) || (tvp == unp->un_lowervp && (unp->un_flag & UNIONFS_OPENEXTL))) return (EBUSY); error = VOP_OPENEXTATTR(tvp, ap->a_cred, ap->a_td); if (error == 0) { if (vn_lock(vp, LK_UPGRADE) != 0) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (!VN_IS_DOOMED(vp)) { if (tvp == unp->un_uppervp) unp->un_flag |= UNIONFS_OPENEXTU; else unp->un_flag |= UNIONFS_OPENEXTL; } vn_lock(vp, LK_DOWNGRADE | LK_RETRY); } return (error); } static int unionfs_closeextattr(struct vop_closeextattr_args *ap) { struct unionfs_node *unp; struct vnode *vp; struct vnode *tvp; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); vp = ap->a_vp; unp = VTOUNIONFS(vp); tvp = NULLVP; if (unp->un_flag & UNIONFS_OPENEXTU) tvp = unp->un_uppervp; else if (unp->un_flag & UNIONFS_OPENEXTL) tvp = unp->un_lowervp; if (tvp == NULLVP) return (EOPNOTSUPP); error = VOP_CLOSEEXTATTR(tvp, ap->a_commit, ap->a_cred, ap->a_td); if (error == 0) { if (vn_lock(vp, LK_UPGRADE) != 0) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (!VN_IS_DOOMED(vp)) { if (tvp == unp->un_uppervp) unp->un_flag &= ~UNIONFS_OPENEXTU; else unp->un_flag &= ~UNIONFS_OPENEXTL; } vn_lock(vp, LK_DOWNGRADE | LK_RETRY); } return (error); } static int unionfs_getextattr(struct vop_getextattr_args *ap) { struct unionfs_node *unp; struct vnode *vp; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = NULLVP; if (unp->un_flag & UNIONFS_OPENEXTU) vp = unp->un_uppervp; else if (unp->un_flag & UNIONFS_OPENEXTL) vp = unp->un_lowervp; if (vp == NULLVP) return (EOPNOTSUPP); return (VOP_GETEXTATTR(vp, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size, ap->a_cred, ap->a_td)); } static int unionfs_setextattr(struct vop_setextattr_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct vnode *ovp; struct ucred *cred; struct thread *td; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); error = EROFS; unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; ovp = NULLVP; cred = ap->a_cred; td = ap->a_td; UNIONFS_INTERNAL_DEBUG("unionfs_setextattr: enter (un_flag=%x)\n", unp->un_flag); if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (unp->un_flag & UNIONFS_OPENEXTU) ovp = unp->un_uppervp; else if (unp->un_flag & UNIONFS_OPENEXTL) ovp = unp->un_lowervp; if (ovp == NULLVP) return (EOPNOTSUPP); if (ovp == lvp && lvp->v_type == VREG) { VOP_CLOSEEXTATTR(lvp, 0, cred, td); if (uvp == NULLVP && (error = unionfs_copyfile(ap->a_vp, 1, cred, td)) != 0) { unionfs_setextattr_reopen: unp = VTOUNIONFS(ap->a_vp); if (unp != NULL && (unp->un_flag & UNIONFS_OPENEXTL) && VOP_OPENEXTATTR(lvp, cred, td)) { #ifdef DIAGNOSTIC panic("unionfs: VOP_OPENEXTATTR failed"); #endif unp->un_flag &= ~UNIONFS_OPENEXTL; } goto unionfs_setextattr_abort; } uvp = unp->un_uppervp; if ((error = VOP_OPENEXTATTR(uvp, cred, td)) != 0) goto unionfs_setextattr_reopen; unp->un_flag &= ~UNIONFS_OPENEXTL; unp->un_flag |= UNIONFS_OPENEXTU; ovp = uvp; } if (ovp == uvp) { int lkflags; unionfs_forward_vop_start(ovp, &lkflags); error = VOP_SETEXTATTR(ovp, ap->a_attrnamespace, ap->a_name, ap->a_uio, cred, td); unionfs_forward_vop_finish(ap->a_vp, ovp, lkflags); } unionfs_setextattr_abort: UNIONFS_INTERNAL_DEBUG("unionfs_setextattr: leave (%d)\n", error); return (error); } static int unionfs_listextattr(struct vop_listextattr_args *ap) { struct unionfs_node *unp; struct vnode *vp; KASSERT_UNIONFS_VNODE(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); vp = NULLVP; if (unp->un_flag & UNIONFS_OPENEXTU) vp = unp->un_uppervp; else if (unp->un_flag & UNIONFS_OPENEXTL) vp = unp->un_lowervp; if (vp == NULLVP) return (EOPNOTSUPP); return (VOP_LISTEXTATTR(vp, ap->a_attrnamespace, ap->a_uio, ap->a_size, ap->a_cred, ap->a_td)); } static int unionfs_deleteextattr(struct vop_deleteextattr_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct vnode *ovp; struct ucred *cred; struct thread *td; int error; KASSERT_UNIONFS_VNODE(ap->a_vp); error = EROFS; unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; ovp = NULLVP; cred = ap->a_cred; td = ap->a_td; UNIONFS_INTERNAL_DEBUG("unionfs_deleteextattr: enter (un_flag=%x)\n", unp->un_flag); if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (unp->un_flag & UNIONFS_OPENEXTU) ovp = unp->un_uppervp; else if (unp->un_flag & UNIONFS_OPENEXTL) ovp = unp->un_lowervp; if (ovp == NULLVP) return (EOPNOTSUPP); if (ovp == lvp && lvp->v_type == VREG) { VOP_CLOSEEXTATTR(lvp, 0, cred, td); if (uvp == NULLVP && (error = unionfs_copyfile(ap->a_vp, 1, cred, td)) != 0) { unionfs_deleteextattr_reopen: unp = VTOUNIONFS(ap->a_vp); if (unp != NULL && (unp->un_flag & UNIONFS_OPENEXTL) && VOP_OPENEXTATTR(lvp, cred, td)) { #ifdef DIAGNOSTIC panic("unionfs: VOP_OPENEXTATTR failed"); #endif unp->un_flag &= ~UNIONFS_OPENEXTL; } goto unionfs_deleteextattr_abort; } uvp = unp->un_uppervp; if ((error = VOP_OPENEXTATTR(uvp, cred, td)) != 0) goto unionfs_deleteextattr_reopen; unp->un_flag &= ~UNIONFS_OPENEXTL; unp->un_flag |= UNIONFS_OPENEXTU; ovp = uvp; } if (ovp == uvp) error = VOP_DELETEEXTATTR(ovp, ap->a_attrnamespace, ap->a_name, ap->a_cred, ap->a_td); unionfs_deleteextattr_abort: UNIONFS_INTERNAL_DEBUG("unionfs_deleteextattr: leave (%d)\n", error); return (error); } static int unionfs_setlabel(struct vop_setlabel_args *ap) { struct unionfs_node *unp; struct vnode *uvp; struct vnode *lvp; struct thread *td; int error; UNIONFS_INTERNAL_DEBUG("unionfs_setlabel: enter\n"); KASSERT_UNIONFS_VNODE(ap->a_vp); error = EROFS; unp = VTOUNIONFS(ap->a_vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; td = ap->a_td; if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (uvp == NULLVP && lvp->v_type == VREG) { if ((error = unionfs_copyfile(ap->a_vp, 1, ap->a_cred, td)) != 0) return (error); uvp = unp->un_uppervp; } if (uvp != NULLVP) error = VOP_SETLABEL(uvp, ap->a_label, ap->a_cred, td); UNIONFS_INTERNAL_DEBUG("unionfs_setlabel: leave (%d)\n", error); return (error); } static int unionfs_vptofh(struct vop_vptofh_args *ap) { return (EOPNOTSUPP); } static int unionfs_add_writecount(struct vop_add_writecount_args *ap) { struct vnode *tvp, *vp; struct unionfs_node *unp; int error, writerefs __diagused; vp = ap->a_vp; unp = VTOUNIONFS(vp); tvp = unp->un_uppervp; KASSERT(tvp != NULL, ("%s: adding write ref without upper vnode", __func__)); error = VOP_ADD_WRITECOUNT(tvp, ap->a_inc); if (error != 0) return (error); /* * We need to track the write refs we've passed to the underlying * vnodes so that we can undo them in case we are forcibly unmounted. */ writerefs = atomic_fetchadd_int(&vp->v_writecount, ap->a_inc); /* text refs are bypassed to lowervp */ VNASSERT(writerefs >= 0, vp, ("%s: invalid write count %d", __func__, writerefs)); VNASSERT(writerefs + ap->a_inc >= 0, vp, ("%s: invalid write count inc %d + %d", __func__, writerefs, ap->a_inc)); return (0); } static int unionfs_vput_pair(struct vop_vput_pair_args *ap) { struct mount *mp; struct vnode *dvp, *vp, **vpp, *lvp, *uvp, *tvp, *tdvp, *tempvp; struct unionfs_node *dunp, *unp; int error, res; dvp = ap->a_dvp; vpp = ap->a_vpp; vp = NULLVP; lvp = NULLVP; uvp = NULLVP; tvp = NULLVP; unp = NULL; dunp = VTOUNIONFS(dvp); if (dunp->un_uppervp != NULL) tdvp = dunp->un_uppervp; else tdvp = dunp->un_lowervp; /* * Underlying vnodes should be locked because the encompassing unionfs * node is locked, but will not be referenced, as the reference will * only be on the unionfs node. Reference them now so that the vput()s * performed by VOP_VPUT_PAIR() will have a reference to drop. */ vref(tdvp); if (vpp != NULL) vp = *vpp; if (vp != NULLVP) { unp = VTOUNIONFS(vp); uvp = unp->un_uppervp; lvp = unp->un_lowervp; if (uvp != NULLVP) tvp = uvp; else tvp = lvp; vref(tvp); /* * If we're being asked to return a locked child vnode, then * we may need to create a replacement vnode in case the * original is reclaimed while the lock is dropped. In that * case we'll need to ensure the mount and the underlying * vnodes aren't also recycled during that window. */ if (!ap->a_unlock_vp) { vhold(vp); if (uvp != NULLVP) vhold(uvp); if (lvp != NULLVP) vhold(lvp); mp = vp->v_mount; vfs_ref(mp); } } ASSERT_VOP_LOCKED(tdvp, __func__); ASSERT_VOP_LOCKED(tvp, __func__); if (tdvp == dunp->un_uppervp && tvp != NULLVP && tvp == lvp) { vput(tvp); vput(tdvp); res = 0; } else { res = VOP_VPUT_PAIR(tdvp, tvp != NULLVP ? &tvp : NULL, true); } ASSERT_VOP_UNLOCKED(tdvp, __func__); ASSERT_VOP_UNLOCKED(tvp, __func__); /* * VOP_VPUT_PAIR() dropped the references we added to the underlying * vnodes, now drop the caller's reference to the unionfs vnodes. */ if (vp != NULLVP && ap->a_unlock_vp) vrele(vp); vrele(dvp); if (vp == NULLVP || ap->a_unlock_vp) return (res); /* * We're being asked to return a locked vnode. At this point, the * underlying vnodes have been unlocked, so vp may have been reclaimed. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) { vput(vp); error = unionfs_nodeget(mp, uvp, lvp, dvp, &tempvp, NULL); if (error == 0) { vn_lock(tempvp, LK_EXCLUSIVE | LK_RETRY); *vpp = tempvp; } else vget(vp, LK_EXCLUSIVE | LK_RETRY); vfs_unbusy(mp); } if (lvp != NULLVP) vdrop(lvp); if (uvp != NULLVP) vdrop(uvp); vdrop(vp); vfs_rel(mp); return (res); } static int unionfs_set_text(struct vop_set_text_args *ap) { struct vnode *tvp; struct unionfs_node *unp; int error; /* * We assume text refs are managed against lvp/uvp through the * executable mapping backed by its VM object. We therefore don't * need to track leased text refs in the case of a forcible unmount. */ unp = VTOUNIONFS(ap->a_vp); ASSERT_VOP_LOCKED(ap->a_vp, __func__); tvp = unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp; error = VOP_SET_TEXT(tvp); return (error); } static int unionfs_unset_text(struct vop_unset_text_args *ap) { struct vnode *tvp; struct unionfs_node *unp; ASSERT_VOP_LOCKED(ap->a_vp, __func__); unp = VTOUNIONFS(ap->a_vp); tvp = unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp; VOP_UNSET_TEXT_CHECKED(tvp); return (0); } static int unionfs_unp_bind(struct vop_unp_bind_args *ap) { struct vnode *tvp; struct unionfs_node *unp; ASSERT_VOP_LOCKED(ap->a_vp, __func__); unp = VTOUNIONFS(ap->a_vp); tvp = unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp; VOP_UNP_BIND(tvp, ap->a_unpcb); return (0); } static int unionfs_unp_connect(struct vop_unp_connect_args *ap) { struct vnode *tvp; struct unionfs_node *unp; ASSERT_VOP_LOCKED(ap->a_vp, __func__); unp = VTOUNIONFS(ap->a_vp); tvp = unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp; VOP_UNP_CONNECT(tvp, ap->a_unpcb); return (0); } static int unionfs_unp_detach(struct vop_unp_detach_args *ap) { struct vnode *tvp; struct unionfs_node *unp; tvp = NULL; /* * VOP_UNP_DETACH() is not guaranteed to be called with the unionfs * vnode locked, so we take the interlock to prevent a concurrent * unmount from freeing the unionfs private data. */ VI_LOCK(ap->a_vp); unp = VTOUNIONFS(ap->a_vp); if (unp != NULL) { tvp = unp->un_uppervp != NULL ? unp->un_uppervp : unp->un_lowervp; /* * Hold the target vnode to prevent a concurrent unionfs * unmount from causing it to be recycled once the interlock * is dropped. */ vholdnz(tvp); } VI_UNLOCK(ap->a_vp); if (tvp != NULL) { VOP_UNP_DETACH(tvp); vdrop(tvp); } return (0); } struct vop_vector unionfs_vnodeops = { .vop_default = &default_vnodeops, .vop_access = unionfs_access, .vop_aclcheck = unionfs_aclcheck, .vop_advlock = unionfs_advlock, .vop_bmap = VOP_EOPNOTSUPP, .vop_cachedlookup = unionfs_lookup, .vop_close = unionfs_close, .vop_closeextattr = unionfs_closeextattr, .vop_create = unionfs_create, .vop_deleteextattr = unionfs_deleteextattr, .vop_fsync = unionfs_fsync, .vop_getacl = unionfs_getacl, .vop_getattr = unionfs_getattr, .vop_getextattr = unionfs_getextattr, .vop_getwritemount = unionfs_getwritemount, .vop_inactive = unionfs_inactive, .vop_need_inactive = vop_stdneed_inactive, .vop_islocked = vop_stdislocked, .vop_ioctl = unionfs_ioctl, .vop_link = unionfs_link, .vop_listextattr = unionfs_listextattr, .vop_lock1 = unionfs_lock, .vop_lookup = vfs_cache_lookup, .vop_mkdir = unionfs_mkdir, .vop_mknod = unionfs_mknod, .vop_open = unionfs_open, .vop_openextattr = unionfs_openextattr, .vop_pathconf = unionfs_pathconf, .vop_poll = unionfs_poll, .vop_print = unionfs_print, .vop_read = unionfs_read, .vop_readdir = unionfs_readdir, .vop_readlink = unionfs_readlink, .vop_reclaim = unionfs_reclaim, .vop_remove = unionfs_remove, .vop_rename = unionfs_rename, .vop_rmdir = unionfs_rmdir, .vop_setacl = unionfs_setacl, .vop_setattr = unionfs_setattr, .vop_setextattr = unionfs_setextattr, .vop_setlabel = unionfs_setlabel, .vop_strategy = unionfs_strategy, .vop_symlink = unionfs_symlink, .vop_unlock = unionfs_unlock, .vop_whiteout = unionfs_whiteout, .vop_write = unionfs_write, .vop_vptofh = unionfs_vptofh, .vop_add_writecount = unionfs_add_writecount, .vop_vput_pair = unionfs_vput_pair, .vop_set_text = unionfs_set_text, .vop_unset_text = unionfs_unset_text, .vop_unp_bind = unionfs_unp_bind, .vop_unp_connect = unionfs_unp_connect, .vop_unp_detach = unionfs_unp_detach, }; VFS_VOP_VECTOR_REGISTER(unionfs_vnodeops); diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index ff671e80b803..f2368877ec93 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -1,6391 +1,6382 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1993, 1995 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Poul-Henning Kamp of the FreeBSD Project. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #ifdef INVARIANTS #include #endif #include #include #ifdef DDB #include #endif #include /* * High level overview of name caching in the VFS layer. * * Originally caching was implemented as part of UFS, later extracted to allow * use by other filesystems. A decision was made to make it optional and * completely detached from the rest of the kernel, which comes with limitations * outlined near the end of this comment block. * * This fundamental choice needs to be revisited. In the meantime, the current * state is described below. Significance of all notable routines is explained * in comments placed above their implementation. Scattered thoroughout the * file are TODO comments indicating shortcomings which can be fixed without * reworking everything (most of the fixes will likely be reusable). Various * details are omitted from this explanation to not clutter the overview, they * have to be checked by reading the code and associated commentary. * * Keep in mind that it's individual path components which are cached, not full * paths. That is, for a fully cached path "foo/bar/baz" there are 3 entries, * one for each name. * * I. Data organization * * Entries are described by "struct namecache" objects and stored in a hash * table. See cache_get_hash for more information. * * "struct vnode" contains pointers to source entries (names which can be found * when traversing through said vnode), destination entries (names of that * vnode (see "Limitations" for a breakdown on the subject) and a pointer to * the parent vnode. * * The (directory vnode; name) tuple reliably determines the target entry if * it exists. * * Since there are no small locks at this time (all are 32 bytes in size on * LP64), the code works around the problem by introducing lock arrays to * protect hash buckets and vnode lists. * * II. Filesystem integration * * Filesystems participating in name caching do the following: * - set vop_lookup routine to vfs_cache_lookup * - set vop_cachedlookup to whatever can perform the lookup if the above fails * - if they support lockless lookup (see below), vop_fplookup_vexec and * vop_fplookup_symlink are set along with the MNTK_FPLOOKUP flag on the * mount point * - call cache_purge or cache_vop_* routines to eliminate stale entries as * applicable * - call cache_enter to add entries depending on the MAKEENTRY flag * * With the above in mind, there are 2 entry points when doing lookups: * - ... -> namei -> cache_fplookup -- this is the default * - ... -> VOP_LOOKUP -> vfs_cache_lookup -- normally only called by namei * should the above fail * * Example code flow how an entry is added: * ... -> namei -> cache_fplookup -> cache_fplookup_noentry -> VOP_LOOKUP -> * vfs_cache_lookup -> VOP_CACHEDLOOKUP -> ufs_lookup_ino -> cache_enter * * III. Performance considerations * * For lockless case forward lookup avoids any writes to shared areas apart * from the terminal path component. In other words non-modifying lookups of * different files don't suffer any scalability problems in the namecache. * Looking up the same file is limited by VFS and goes beyond the scope of this * file. * * At least on amd64 the single-threaded bottleneck for long paths is hashing * (see cache_get_hash). There are cases where the code issues acquire fence * multiple times, they can be combined on architectures which suffer from it. * * For locked case each encountered vnode has to be referenced and locked in * order to be handed out to the caller (normally that's namei). This * introduces significant hit single-threaded and serialization multi-threaded. * * Reverse lookup (e.g., "getcwd") fully scales provided it is fully cached -- * avoids any writes to shared areas to any components. * * Unrelated insertions are partially serialized on updating the global entry * counter and possibly serialized on colliding bucket or vnode locks. * * IV. Observability * * Note not everything has an explicit dtrace probe nor it should have, thus * some of the one-liners below depend on implementation details. * * Examples: * * # Check what lookups failed to be handled in a lockless manner. Column 1 is * # line number, column 2 is status code (see cache_fpl_status) * dtrace -n 'vfs:fplookup:lookup:done { @[arg1, arg2] = count(); }' * * # Lengths of names added by binary name * dtrace -n 'fbt::cache_enter_time:entry { @[execname] = quantize(args[2]->cn_namelen); }' * * # Same as above but only those which exceed 64 characters * dtrace -n 'fbt::cache_enter_time:entry /args[2]->cn_namelen > 64/ { @[execname] = quantize(args[2]->cn_namelen); }' * * # Who is performing lookups with spurious slashes (e.g., "foo//bar") and what * # path is it * dtrace -n 'fbt::cache_fplookup_skip_slashes:entry { @[execname, stringof(args[0]->cnp->cn_pnbuf)] = count(); }' * * V. Limitations and implementation defects * * - since it is possible there is no entry for an open file, tools like * "procstat" may fail to resolve fd -> vnode -> path to anything * - even if a filesystem adds an entry, it may get purged (e.g., due to memory * shortage) in which case the above problem applies * - hardlinks are not tracked, thus if a vnode is reachable in more than one * way, resolving a name may return a different path than the one used to * open it (even if said path is still valid) * - by default entries are not added for newly created files * - adding an entry may need to evict negative entry first, which happens in 2 * distinct places (evicting on lookup, adding in a later VOP) making it * impossible to simply reuse it * - there is a simple scheme to evict negative entries as the cache is approaching * its capacity, but it is very unclear if doing so is a good idea to begin with * - vnodes are subject to being recycled even if target inode is left in memory, * which loses the name cache entries when it perhaps should not. in case of tmpfs * names get duplicated -- kept by filesystem itself and namecache separately * - struct namecache has a fixed size and comes in 2 variants, often wasting * space. now hard to replace with malloc due to dependence on SMR, which * requires UMA zones to opt in * - lack of better integration with the kernel also turns nullfs into a layered * filesystem instead of something which can take advantage of caching * * Appendix A: where is the time lost, expanding on paragraph III * * While some care went into optimizing lookups, there is still plenty of * performance left on the table, most notably from single-threaded standpoint. * Below is a woefully incomplete list of changes which can help. Ideas are * mostly sketched out, no claim is made all kinks or prerequisites are laid * out. * * Note there is performance lost all over VFS. * * === SMR-only lookup * * For commonly used ops like stat(2), when the terminal vnode *is* cached, * lockless lookup could refrain from refing/locking the found vnode and * instead return while within the SMR section. Then a call to, say, * vop_stat_smr could do the work (or fail with EAGAIN), finally the result * would be validated with seqc not changing. This would be faster * single-threaded as it dodges atomics and would provide full scalability for * multicore uses. This would *not* work for open(2) or other calls which need * the vnode to hang around for the long haul, but would work for aforementioned * stat(2) but also access(2), readlink(2), realpathat(2) and probably more. * * === hotpatching for sdt probes * * They result in *tons* of branches all over with rather regrettable codegen * at times. Removing sdt probes altogether gives over 2% boost in lookup rate. * Reworking the code to patch itself at runtime with asm goto would solve it. * asm goto is fully supported by gcc and clang. * * === copyinstr * * On all architectures it operates one byte at a time, while it could be * word-sized instead thanks to the Mycroft trick. * * API itself is rather pessimal for path lookup, accepting arbitrary sizes and * *optionally* filling in the length parameter. * * Instead a new routine (copyinpath?) could be introduced, demanding a buffer * size which is a multiply of the word (and never zero), with the length * always returned. On top of it the routine could be allowed to transform the * buffer in arbitrary ways, most notably writing past the found length (not to * be confused with writing past buffer size) -- this would allow word-sized * movs while checking for '\0' later. * * === detour through namei * * Currently one suffers being called from namei, which then has to check if * things worked out locklessly. Instead the lockless lookup could be the * actual entry point which calls what is currently namei as a fallback. * * === avoidable branches in cache_can_fplookup * * The cache_fast_lookup_enabled flag check could be hotpatchable (in fact if * this is off, none of fplookup code should execute). * * Both audit and capsicum branches can be combined into one, but it requires * paying off a lot of tech debt first. * * ni_startdir could be indicated with a flag in cn_flags, eliminating the * branch. * * === mount stacks * * Crossing a mount requires checking if perhaps something is mounted on top. * Instead, an additional entry could be added to struct mount with a pointer * to the final mount on the stack. This would be recalculated on each * mount/unmount. * * === root vnodes * * It could become part of the API contract to *always* have a rootvnode set in * mnt_rootvnode. Such vnodes are annotated with VV_ROOT and vnlru would have * to be modified to always skip them. * * === inactive on v_usecount reaching 0 * * VOP_NEED_INACTIVE should not exist. Filesystems would indicate need for such * processing with a bit in usecount. * * === v_holdcnt * * Hold count should probably get eliminated, but one can argue it is a useful * feature. Even if so, handling of v_usecount could be decoupled from it -- * vnlru et al would consider the vnode not-freeable if has either hold or * usecount on it. * * This would eliminate 2 atomics. */ static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Name cache"); SDT_PROVIDER_DECLARE(vfs); SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", "struct vnode *"); SDT_PROBE_DEFINE3(vfs, namecache, enter, duplicate, "struct vnode *", "char *", "struct vnode *"); SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", "char *"); SDT_PROBE_DEFINE2(vfs, namecache, fullpath_smr, hit, "struct vnode *", "const char *"); SDT_PROBE_DEFINE4(vfs, namecache, fullpath_smr, miss, "struct vnode *", "struct namecache *", "int", "int"); SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", "char *", "struct vnode *"); SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", "struct vnode *", "char *"); SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", "struct vnode *"); SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, "struct vnode *", "char *"); SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", "char *"); SDT_PROBE_DEFINE2(vfs, namecache, removecnp, hit, "struct vnode *", "struct componentname *"); SDT_PROBE_DEFINE2(vfs, namecache, removecnp, miss, "struct vnode *", "struct componentname *"); SDT_PROBE_DEFINE3(vfs, namecache, purge, done, "struct vnode *", "size_t", "size_t"); SDT_PROBE_DEFINE1(vfs, namecache, purge, batch, "int"); SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", "struct vnode *"); SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", "char *"); SDT_PROBE_DEFINE2(vfs, namecache, evict_negative, done, "struct vnode *", "char *"); SDT_PROBE_DEFINE1(vfs, namecache, symlink, alloc__fail, "size_t"); SDT_PROBE_DEFINE3(vfs, fplookup, lookup, done, "struct nameidata", "int", "bool"); SDT_PROBE_DECLARE(vfs, namei, lookup, entry); SDT_PROBE_DECLARE(vfs, namei, lookup, return); static char __read_frequently cache_fast_lookup_enabled = true; /* * This structure describes the elements in the cache of recent * names looked up by namei. */ struct negstate { u_char neg_flag; u_char neg_hit; }; _Static_assert(sizeof(struct negstate) <= sizeof(struct vnode *), "the state must fit in a union with a pointer without growing it"); struct namecache { LIST_ENTRY(namecache) nc_src; /* source vnode list */ TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ CK_SLIST_ENTRY(namecache) nc_hash;/* hash chain */ struct vnode *nc_dvp; /* vnode of parent of name */ union { struct vnode *nu_vp; /* vnode the name refers to */ struct negstate nu_neg;/* negative entry state */ } n_un; u_char nc_flag; /* flag bits */ u_char nc_nlen; /* length of name */ char nc_name[]; /* segment name + nul */ }; /* * struct namecache_ts repeats struct namecache layout up to the * nc_nlen member. * struct namecache_ts is used in place of struct namecache when time(s) need * to be stored. The nc_dotdottime field is used when a cache entry is mapping * both a non-dotdot directory name plus dotdot for the directory's * parent. * * See below for alignment requirement. */ struct namecache_ts { struct timespec nc_time; /* timespec provided by fs */ struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ int nc_ticks; /* ticks value when entry was added */ int nc_pad; struct namecache nc_nc; }; TAILQ_HEAD(cache_freebatch, namecache); /* * At least mips n32 performs 64-bit accesses to timespec as found * in namecache_ts and requires them to be aligned. Since others * may be in the same spot suffer a little bit and enforce the * alignment for everyone. Note this is a nop for 64-bit platforms. */ #define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t) /* * TODO: the initial value of CACHE_PATH_CUTOFF was inherited from the * 4.4 BSD codebase. Later on struct namecache was tweaked to become * smaller and the value was bumped to retain the total size, but it * was never re-evaluated for suitability. A simple test counting * lengths during package building shows that the value of 45 covers * about 86% of all added entries, reaching 99% at 65. * * Regardless of the above, use of dedicated zones instead of malloc may be * inducing additional waste. This may be hard to address as said zones are * tied to VFS SMR. Even if retaining them, the current split should be * re-evaluated. */ #ifdef __LP64__ #define CACHE_PATH_CUTOFF 45 #define CACHE_LARGE_PAD 6 #else #define CACHE_PATH_CUTOFF 41 #define CACHE_LARGE_PAD 2 #endif #define CACHE_ZONE_SMALL_SIZE (offsetof(struct namecache, nc_name) + CACHE_PATH_CUTOFF + 1) #define CACHE_ZONE_SMALL_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_SMALL_SIZE) #define CACHE_ZONE_LARGE_SIZE (offsetof(struct namecache, nc_name) + NAME_MAX + 1 + CACHE_LARGE_PAD) #define CACHE_ZONE_LARGE_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_LARGE_SIZE) _Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); _Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); _Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); _Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); #define nc_vp n_un.nu_vp #define nc_neg n_un.nu_neg /* * Flags in namecache.nc_flag */ #define NCF_WHITE 0x01 #define NCF_ISDOTDOT 0x02 #define NCF_TS 0x04 #define NCF_DTS 0x08 #define NCF_DVDROP 0x10 #define NCF_NEGATIVE 0x20 #define NCF_INVALID 0x40 #define NCF_WIP 0x80 /* * Flags in negstate.neg_flag */ #define NEG_HOT 0x01 static bool cache_neg_evict_cond(u_long lnumcache); /* * Mark an entry as invalid. * * This is called before it starts getting deconstructed. */ static void cache_ncp_invalidate(struct namecache *ncp) { KASSERT((ncp->nc_flag & NCF_INVALID) == 0, ("%s: entry %p already invalid", __func__, ncp)); atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_INVALID); atomic_thread_fence_rel(); } /* * Does this entry match the given directory and name? */ static bool cache_ncp_match(struct namecache *ncp, struct vnode *dvp, struct componentname *cnp) { return (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && bcmp(ncp->nc_name, cnp->cn_nameptr, cnp->cn_namelen) == 0); } /* * Check whether the entry can be safely used. * * All places which elide locks are supposed to call this after they are * done with reading from an entry. */ #define cache_ncp_canuse(ncp) ({ \ struct namecache *_ncp = (ncp); \ u_char _nc_flag; \ \ atomic_thread_fence_acq(); \ _nc_flag = atomic_load_char(&_ncp->nc_flag); \ __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP)) == 0); \ }) /* * Like the above but also checks NCF_WHITE. */ #define cache_fpl_neg_ncp_canuse(ncp) ({ \ struct namecache *_ncp = (ncp); \ u_char _nc_flag; \ \ atomic_thread_fence_acq(); \ _nc_flag = atomic_load_char(&_ncp->nc_flag); \ __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP | NCF_WHITE)) == 0); \ }) VFS_SMR_DECLARE; static SYSCTL_NODE(_vfs_cache, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Name cache parameters"); static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */ SYSCTL_UINT(_vfs_cache_param, OID_AUTO, size, CTLFLAG_RD, &ncsize, 0, "Total namecache capacity"); u_int ncsizefactor = 2; SYSCTL_UINT(_vfs_cache_param, OID_AUTO, sizefactor, CTLFLAG_RW, &ncsizefactor, 0, "Size factor for namecache"); static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */ SYSCTL_ULONG(_vfs_cache_param, OID_AUTO, negfactor, CTLFLAG_RW, &ncnegfactor, 0, "Ratio of negative namecache entries"); /* * Negative entry % of namecache capacity above which automatic eviction is allowed. * * Check cache_neg_evict_cond for details. */ static u_int ncnegminpct = 3; static u_int __read_mostly neg_min; /* the above recomputed against ncsize */ SYSCTL_UINT(_vfs_cache_param, OID_AUTO, negmin, CTLFLAG_RD, &neg_min, 0, "Negative entry count above which automatic eviction is allowed"); /* * Structures associated with name caching. */ #define NCHHASH(hash) \ (&nchashtbl[(hash) & nchash]) static __read_mostly CK_SLIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */ static u_long __read_mostly nchash; /* size of hash table */ SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "Size of namecache hash table"); static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */ static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */ struct nchstats nchstats; /* cache effectiveness statistics */ static u_int __exclusive_cache_line neg_cycle; #define ncneghash 3 #define numneglists (ncneghash + 1) struct neglist { struct mtx nl_evict_lock; struct mtx nl_lock __aligned(CACHE_LINE_SIZE); TAILQ_HEAD(, namecache) nl_list; TAILQ_HEAD(, namecache) nl_hotlist; u_long nl_hotnum; } __aligned(CACHE_LINE_SIZE); static struct neglist neglists[numneglists]; static inline struct neglist * NCP2NEGLIST(struct namecache *ncp) { return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]); } static inline struct negstate * NCP2NEGSTATE(struct namecache *ncp) { MPASS(atomic_load_char(&ncp->nc_flag) & NCF_NEGATIVE); return (&ncp->nc_neg); } #define numbucketlocks (ncbuckethash + 1) static u_int __read_mostly ncbuckethash; static struct mtx_padalign __read_mostly *bucketlocks; #define HASH2BUCKETLOCK(hash) \ ((struct mtx *)(&bucketlocks[((hash) & ncbuckethash)])) #define numvnodelocks (ncvnodehash + 1) static u_int __read_mostly ncvnodehash; static struct mtx __read_mostly *vnodelocks; static inline struct mtx * VP2VNODELOCK(struct vnode *vp) { return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]); } /* * Search the hash table for a namecache entry. Either the corresponding bucket * must be locked, or the caller must be in an SMR read section. */ static struct namecache * cache_ncp_find(struct vnode *dvp, struct componentname *cnp, uint32_t hash) { struct namecache *ncp; KASSERT(mtx_owned(HASH2BUCKETLOCK(hash)) || VFS_SMR_ENTERED(), ("%s: hash %u not locked", __func__, hash)); CK_SLIST_FOREACH(ncp, NCHHASH(hash), nc_hash) { if (cache_ncp_match(ncp, dvp, cnp)) break; } return (ncp); } static void cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) { struct namecache_ts *ncp_ts; KASSERT((ncp->nc_flag & NCF_TS) != 0 || (tsp == NULL && ticksp == NULL), ("No NCF_TS")); if (tsp == NULL) return; ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); *tsp = ncp_ts->nc_time; *ticksp = ncp_ts->nc_ticks; } #ifdef DEBUG_CACHE static int __read_mostly doingcache = 1; /* 1 => enable the cache */ SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "VFS namecache enabled"); #endif /* Export size information to userland */ SYSCTL_SIZEOF_STRUCT(namecache); /* * The new name cache statistics */ static SYSCTL_NODE(_vfs_cache, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Name cache statistics"); #define STATNODE_ULONG(name, varname, descr) \ SYSCTL_ULONG(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); #define STATNODE_COUNTER(name, varname, descr) \ static COUNTER_U64_DEFINE_EARLY(varname); \ SYSCTL_COUNTER_U64(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, \ descr); STATNODE_ULONG(neg, numneg, "Number of negative cache entries"); STATNODE_ULONG(count, numcache, "Number of cache entries"); STATNODE_COUNTER(heldvnodes, numcachehv, "Number of namecache entries with vnodes held"); STATNODE_COUNTER(drops, numdrops, "Number of dropped entries due to reaching the limit"); STATNODE_COUNTER(miss, nummiss, "Number of cache misses"); STATNODE_COUNTER(misszap, nummisszap, "Number of cache misses we do not want to cache"); STATNODE_COUNTER(poszaps, numposzaps, "Number of cache hits (positive) we do not want to cache"); STATNODE_COUNTER(poshits, numposhits, "Number of cache hits (positive)"); STATNODE_COUNTER(negzaps, numnegzaps, "Number of cache hits (negative) we do not want to cache"); STATNODE_COUNTER(neghits, numneghits, "Number of cache hits (negative)"); /* These count for vn_getcwd(), too. */ STATNODE_COUNTER(fullpathcalls, numfullpathcalls, "Number of fullpath search calls"); STATNODE_COUNTER(fullpathfail2, numfullpathfail2, "Number of fullpath search errors (VOP_VPTOCNP failures)"); STATNODE_COUNTER(fullpathfail4, numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); STATNODE_COUNTER(fullpathfound, numfullpathfound, "Number of successful fullpath calls"); STATNODE_COUNTER(symlinktoobig, symlinktoobig, "Number of times symlink did not fit the cache"); /* * Debug or developer statistics. */ static SYSCTL_NODE(_vfs_cache, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Name cache debugging"); #define DEBUGNODE_ULONG(name, varname, descr) \ SYSCTL_ULONG(_vfs_cache_debug, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); static u_long zap_bucket_relock_success; DEBUGNODE_ULONG(zap_bucket_relock_success, zap_bucket_relock_success, "Number of successful removals after relocking"); static u_long zap_bucket_fail; DEBUGNODE_ULONG(zap_bucket_fail, zap_bucket_fail, ""); static u_long zap_bucket_fail2; DEBUGNODE_ULONG(zap_bucket_fail2, zap_bucket_fail2, ""); static u_long cache_lock_vnodes_cel_3_failures; DEBUGNODE_ULONG(vnodes_cel_3_failures, cache_lock_vnodes_cel_3_failures, "Number of times 3-way vnode locking failed"); static void cache_zap_locked(struct namecache *ncp); static int vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *buflen, size_t addend); static int vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *buflen); static int vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *len, size_t addend); static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); static inline void cache_assert_vlp_locked(struct mtx *vlp) { if (vlp != NULL) mtx_assert(vlp, MA_OWNED); } static inline void cache_assert_vnode_locked(struct vnode *vp) { struct mtx *vlp; vlp = VP2VNODELOCK(vp); cache_assert_vlp_locked(vlp); } /* * Directory vnodes with entries are held for two reasons: * 1. make them less of a target for reclamation in vnlru * 2. suffer smaller performance penalty in locked lookup as requeieing is avoided * * It will be feasible to stop doing it altogether if all filesystems start * supporting lockless lookup. */ static void cache_hold_vnode(struct vnode *vp) { cache_assert_vnode_locked(vp); VNPASS(LIST_EMPTY(&vp->v_cache_src), vp); vhold(vp); counter_u64_add(numcachehv, 1); } static void cache_drop_vnode(struct vnode *vp) { /* * Called after all locks are dropped, meaning we can't assert * on the state of v_cache_src. */ vdrop(vp); counter_u64_add(numcachehv, -1); } /* * UMA zones. */ static uma_zone_t __read_mostly cache_zone_small; static uma_zone_t __read_mostly cache_zone_small_ts; static uma_zone_t __read_mostly cache_zone_large; static uma_zone_t __read_mostly cache_zone_large_ts; char * cache_symlink_alloc(size_t size, int flags) { if (size < CACHE_ZONE_SMALL_SIZE) { return (uma_zalloc_smr(cache_zone_small, flags)); } if (size < CACHE_ZONE_LARGE_SIZE) { return (uma_zalloc_smr(cache_zone_large, flags)); } counter_u64_add(symlinktoobig, 1); SDT_PROBE1(vfs, namecache, symlink, alloc__fail, size); return (NULL); } void cache_symlink_free(char *string, size_t size) { MPASS(string != NULL); KASSERT(size < CACHE_ZONE_LARGE_SIZE, ("%s: size %zu too big", __func__, size)); if (size < CACHE_ZONE_SMALL_SIZE) { uma_zfree_smr(cache_zone_small, string); return; } if (size < CACHE_ZONE_LARGE_SIZE) { uma_zfree_smr(cache_zone_large, string); return; } __assert_unreachable(); } static struct namecache * cache_alloc_uma(int len, bool ts) { struct namecache_ts *ncp_ts; struct namecache *ncp; if (__predict_false(ts)) { if (len <= CACHE_PATH_CUTOFF) ncp_ts = uma_zalloc_smr(cache_zone_small_ts, M_WAITOK); else ncp_ts = uma_zalloc_smr(cache_zone_large_ts, M_WAITOK); ncp = &ncp_ts->nc_nc; } else { if (len <= CACHE_PATH_CUTOFF) ncp = uma_zalloc_smr(cache_zone_small, M_WAITOK); else ncp = uma_zalloc_smr(cache_zone_large, M_WAITOK); } return (ncp); } static void cache_free_uma(struct namecache *ncp) { struct namecache_ts *ncp_ts; if (__predict_false(ncp->nc_flag & NCF_TS)) { ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) uma_zfree_smr(cache_zone_small_ts, ncp_ts); else uma_zfree_smr(cache_zone_large_ts, ncp_ts); } else { if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) uma_zfree_smr(cache_zone_small, ncp); else uma_zfree_smr(cache_zone_large, ncp); } } static struct namecache * cache_alloc(int len, bool ts) { u_long lnumcache; /* * Avoid blowout in namecache entries. * * Bugs: * 1. filesystems may end up trying to add an already existing entry * (for example this can happen after a cache miss during concurrent * lookup), in which case we will call cache_neg_evict despite not * adding anything. * 2. the routine may fail to free anything and no provisions are made * to make it try harder (see the inside for failure modes) * 3. it only ever looks at negative entries. */ lnumcache = atomic_fetchadd_long(&numcache, 1) + 1; if (cache_neg_evict_cond(lnumcache)) { lnumcache = atomic_load_long(&numcache); } if (__predict_false(lnumcache >= ncsize)) { atomic_subtract_long(&numcache, 1); counter_u64_add(numdrops, 1); return (NULL); } return (cache_alloc_uma(len, ts)); } static void cache_free(struct namecache *ncp) { MPASS(ncp != NULL); if ((ncp->nc_flag & NCF_DVDROP) != 0) { cache_drop_vnode(ncp->nc_dvp); } cache_free_uma(ncp); atomic_subtract_long(&numcache, 1); } static void cache_free_batch(struct cache_freebatch *batch) { struct namecache *ncp, *nnp; int i; i = 0; if (TAILQ_EMPTY(batch)) goto out; TAILQ_FOREACH_SAFE(ncp, batch, nc_dst, nnp) { if ((ncp->nc_flag & NCF_DVDROP) != 0) { cache_drop_vnode(ncp->nc_dvp); } cache_free_uma(ncp); i++; } atomic_subtract_long(&numcache, i); out: SDT_PROBE1(vfs, namecache, purge, batch, i); } /* * Hashing. * * The code was made to use FNV in 2001 and this choice needs to be revisited. * * Short summary of the difficulty: * The longest name which can be inserted is NAME_MAX characters in length (or * 255 at the time of writing this comment), while majority of names used in * practice are significantly shorter (mostly below 10). More importantly * majority of lookups performed find names are even shorter than that. * * This poses a problem where hashes which do better than FNV past word size * (or so) tend to come with additional overhead when finalizing the result, * making them noticeably slower for the most commonly used range. * * Consider a path like: /usr/obj/usr/src/sys/amd64/GENERIC/vnode_if.c * * When looking it up the most time consuming part by a large margin (at least * on amd64) is hashing. Replacing FNV with something which pessimizes short * input would make the slowest part stand out even more. */ /* * TODO: With the value stored we can do better than computing the hash based * on the address. */ static void cache_prehash(struct vnode *vp) { vp->v_nchash = fnv_32_buf(&vp, sizeof(vp), FNV1_32_INIT); } static uint32_t cache_get_hash(char *name, u_char len, struct vnode *dvp) { return (fnv_32_buf(name, len, dvp->v_nchash)); } static uint32_t cache_get_hash_iter_start(struct vnode *dvp) { return (dvp->v_nchash); } static uint32_t cache_get_hash_iter(char c, uint32_t hash) { return (fnv_32_buf(&c, 1, hash)); } static uint32_t cache_get_hash_iter_finish(uint32_t hash) { return (hash); } static inline struct nchashhead * NCP2BUCKET(struct namecache *ncp) { uint32_t hash; hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); return (NCHHASH(hash)); } static inline struct mtx * NCP2BUCKETLOCK(struct namecache *ncp) { uint32_t hash; hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); return (HASH2BUCKETLOCK(hash)); } #ifdef INVARIANTS static void cache_assert_bucket_locked(struct namecache *ncp) { struct mtx *blp; blp = NCP2BUCKETLOCK(ncp); mtx_assert(blp, MA_OWNED); } static void cache_assert_bucket_unlocked(struct namecache *ncp) { struct mtx *blp; blp = NCP2BUCKETLOCK(ncp); mtx_assert(blp, MA_NOTOWNED); } #else #define cache_assert_bucket_locked(x) do { } while (0) #define cache_assert_bucket_unlocked(x) do { } while (0) #endif #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y)) static void _cache_sort_vnodes(void **p1, void **p2) { void *tmp; MPASS(*p1 != NULL || *p2 != NULL); if (*p1 > *p2) { tmp = *p2; *p2 = *p1; *p1 = tmp; } } static void cache_lock_all_buckets(void) { u_int i; for (i = 0; i < numbucketlocks; i++) mtx_lock(&bucketlocks[i]); } static void cache_unlock_all_buckets(void) { u_int i; for (i = 0; i < numbucketlocks; i++) mtx_unlock(&bucketlocks[i]); } static void cache_lock_all_vnodes(void) { u_int i; for (i = 0; i < numvnodelocks; i++) mtx_lock(&vnodelocks[i]); } static void cache_unlock_all_vnodes(void) { u_int i; for (i = 0; i < numvnodelocks; i++) mtx_unlock(&vnodelocks[i]); } static int cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2) { cache_sort_vnodes(&vlp1, &vlp2); if (vlp1 != NULL) { if (!mtx_trylock(vlp1)) return (EAGAIN); } if (!mtx_trylock(vlp2)) { if (vlp1 != NULL) mtx_unlock(vlp1); return (EAGAIN); } return (0); } static void cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2) { MPASS(vlp1 != NULL || vlp2 != NULL); MPASS(vlp1 <= vlp2); if (vlp1 != NULL) mtx_lock(vlp1); if (vlp2 != NULL) mtx_lock(vlp2); } static void cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2) { MPASS(vlp1 != NULL || vlp2 != NULL); if (vlp1 != NULL) mtx_unlock(vlp1); if (vlp2 != NULL) mtx_unlock(vlp2); } static int sysctl_nchstats(SYSCTL_HANDLER_ARGS) { struct nchstats snap; if (req->oldptr == NULL) return (SYSCTL_OUT(req, 0, sizeof(snap))); snap = nchstats; snap.ncs_goodhits = counter_u64_fetch(numposhits); snap.ncs_neghits = counter_u64_fetch(numneghits); snap.ncs_badhits = counter_u64_fetch(numposzaps) + counter_u64_fetch(numnegzaps); snap.ncs_miss = counter_u64_fetch(nummisszap) + counter_u64_fetch(nummiss); return (SYSCTL_OUT(req, &snap, sizeof(snap))); } SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", "VFS cache effectiveness statistics"); static int sysctl_hitpct(SYSCTL_HANDLER_ARGS) { long poshits, neghits, miss, total; long pct; poshits = counter_u64_fetch(numposhits); neghits = counter_u64_fetch(numneghits); miss = counter_u64_fetch(nummiss); total = poshits + neghits + miss; pct = 0; if (total != 0) pct = ((poshits + neghits) * 100) / total; return (sysctl_handle_int(oidp, 0, pct, req)); } SYSCTL_PROC(_vfs_cache_stats, OID_AUTO, hitpct, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_hitpct, "I", "Percentage of hits"); static void cache_recalc_neg_min(void) { neg_min = (ncsize * ncnegminpct) / 100; } static int sysctl_negminpct(SYSCTL_HANDLER_ARGS) { u_int val; int error; val = ncnegminpct; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val == ncnegminpct) return (0); if (val < 0 || val > 99) return (EINVAL); ncnegminpct = val; cache_recalc_neg_min(); return (0); } SYSCTL_PROC(_vfs_cache_param, OID_AUTO, negminpct, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_negminpct, "I", "Negative entry \% of namecache capacity above which automatic eviction is allowed"); #ifdef DEBUG_CACHE /* * Grab an atomic snapshot of the name cache hash chain lengths */ static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "hash table stats"); static int sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) { struct nchashhead *ncpp; struct namecache *ncp; int i, error, n_nchash, *cntbuf; retry: n_nchash = nchash + 1; /* nchash is max index, not count */ if (req->oldptr == NULL) return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); cache_lock_all_buckets(); if (n_nchash != nchash + 1) { cache_unlock_all_buckets(); free(cntbuf, M_TEMP); goto retry; } /* Scan hash tables counting entries */ for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) CK_SLIST_FOREACH(ncp, ncpp, nc_hash) cntbuf[i]++; cache_unlock_all_buckets(); for (error = 0, i = 0; i < n_nchash; i++) if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) break; free(cntbuf, M_TEMP); return (error); } SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths"); static int sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) { int error; struct nchashhead *ncpp; struct namecache *ncp; int n_nchash; int count, maxlength, used, pct; if (!req->oldptr) return SYSCTL_OUT(req, 0, 4 * sizeof(int)); cache_lock_all_buckets(); n_nchash = nchash + 1; /* nchash is max index, not count */ used = 0; maxlength = 0; /* Scan hash tables for applicable entries */ for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { count = 0; CK_SLIST_FOREACH(ncp, ncpp, nc_hash) { count++; } if (count) used++; if (maxlength < count) maxlength = count; } n_nchash = nchash + 1; cache_unlock_all_buckets(); pct = (used * 100) / (n_nchash / 100); error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); if (error) return (error); error = SYSCTL_OUT(req, &used, sizeof(used)); if (error) return (error); error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); if (error) return (error); error = SYSCTL_OUT(req, &pct, sizeof(pct)); if (error) return (error); return (0); } SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); #endif /* * Negative entries management * * Various workloads create plenty of negative entries and barely use them * afterwards. Moreover malicious users can keep performing bogus lookups * adding even more entries. For example "make tinderbox" as of writing this * comment ends up with 2.6M namecache entries in total, 1.2M of which are * negative. * * As such, a rather aggressive eviction method is needed. The currently * employed method is a placeholder. * * Entries are split over numneglists separate lists, each of which is further * split into hot and cold entries. Entries get promoted after getting a hit. * Eviction happens on addition of new entry. */ static SYSCTL_NODE(_vfs_cache, OID_AUTO, neg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Name cache negative entry statistics"); SYSCTL_ULONG(_vfs_cache_neg, OID_AUTO, count, CTLFLAG_RD, &numneg, 0, "Number of negative cache entries"); static COUNTER_U64_DEFINE_EARLY(neg_created); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, created, CTLFLAG_RD, &neg_created, "Number of created negative entries"); static COUNTER_U64_DEFINE_EARLY(neg_evicted); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evicted, CTLFLAG_RD, &neg_evicted, "Number of evicted negative entries"); static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_empty); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_empty, CTLFLAG_RD, &neg_evict_skipped_empty, "Number of times evicting failed due to lack of entries"); static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_missed); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_missed, CTLFLAG_RD, &neg_evict_skipped_missed, "Number of times evicting failed due to target entry disappearing"); static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_contended); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_contended, CTLFLAG_RD, &neg_evict_skipped_contended, "Number of times evicting failed due to contention"); SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, hits, CTLFLAG_RD, &numneghits, "Number of cache hits (negative)"); static int sysctl_neg_hot(SYSCTL_HANDLER_ARGS) { int i, out; out = 0; for (i = 0; i < numneglists; i++) out += neglists[i].nl_hotnum; return (SYSCTL_OUT(req, &out, sizeof(out))); } SYSCTL_PROC(_vfs_cache_neg, OID_AUTO, hot, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, sysctl_neg_hot, "I", "Number of hot negative entries"); static void cache_neg_init(struct namecache *ncp) { struct negstate *ns; ncp->nc_flag |= NCF_NEGATIVE; ns = NCP2NEGSTATE(ncp); ns->neg_flag = 0; ns->neg_hit = 0; counter_u64_add(neg_created, 1); } #define CACHE_NEG_PROMOTION_THRESH 2 static bool cache_neg_hit_prep(struct namecache *ncp) { struct negstate *ns; u_char n; ns = NCP2NEGSTATE(ncp); n = atomic_load_char(&ns->neg_hit); for (;;) { if (n >= CACHE_NEG_PROMOTION_THRESH) return (false); if (atomic_fcmpset_8(&ns->neg_hit, &n, n + 1)) break; } return (n + 1 == CACHE_NEG_PROMOTION_THRESH); } /* * Nothing to do here but it is provided for completeness as some * cache_neg_hit_prep callers may end up returning without even * trying to promote. */ #define cache_neg_hit_abort(ncp) do { } while (0) static void cache_neg_hit_finish(struct namecache *ncp) { SDT_PROBE2(vfs, namecache, lookup, hit__negative, ncp->nc_dvp, ncp->nc_name); counter_u64_add(numneghits, 1); } /* * Move a negative entry to the hot list. */ static void cache_neg_promote_locked(struct namecache *ncp) { struct neglist *nl; struct negstate *ns; ns = NCP2NEGSTATE(ncp); nl = NCP2NEGLIST(ncp); mtx_assert(&nl->nl_lock, MA_OWNED); if ((ns->neg_flag & NEG_HOT) == 0) { TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); TAILQ_INSERT_TAIL(&nl->nl_hotlist, ncp, nc_dst); nl->nl_hotnum++; ns->neg_flag |= NEG_HOT; } } /* * Move a hot negative entry to the cold list. */ static void cache_neg_demote_locked(struct namecache *ncp) { struct neglist *nl; struct negstate *ns; ns = NCP2NEGSTATE(ncp); nl = NCP2NEGLIST(ncp); mtx_assert(&nl->nl_lock, MA_OWNED); MPASS(ns->neg_flag & NEG_HOT); TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); nl->nl_hotnum--; ns->neg_flag &= ~NEG_HOT; atomic_store_char(&ns->neg_hit, 0); } /* * Move a negative entry to the hot list if it matches the lookup. * * We have to take locks, but they may be contended and in the worst * case we may need to go off CPU. We don't want to spin within the * smr section and we can't block with it. Exiting the section means * the found entry could have been evicted. We are going to look it * up again. */ static bool cache_neg_promote_cond(struct vnode *dvp, struct componentname *cnp, struct namecache *oncp, uint32_t hash) { struct namecache *ncp; struct neglist *nl; u_char nc_flag; nl = NCP2NEGLIST(oncp); mtx_lock(&nl->nl_lock); /* * For hash iteration. */ vfs_smr_enter(); /* * Avoid all surprises by only succeeding if we got the same entry and * bailing completely otherwise. * XXX There are no provisions to keep the vnode around, meaning we may * end up promoting a negative entry for a *new* vnode and returning * ENOENT on its account. This is the error we want to return anyway * and promotion is harmless. * * In particular at this point there can be a new ncp which matches the * search but hashes to a different neglist. */ CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { if (ncp == oncp) break; } /* * No match to begin with. */ if (__predict_false(ncp == NULL)) { goto out_abort; } /* * The newly found entry may be something different... */ if (!cache_ncp_match(ncp, dvp, cnp)) { goto out_abort; } /* * ... and not even negative. */ nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_NEGATIVE) == 0) { goto out_abort; } if (!cache_ncp_canuse(ncp)) { goto out_abort; } cache_neg_promote_locked(ncp); cache_neg_hit_finish(ncp); vfs_smr_exit(); mtx_unlock(&nl->nl_lock); return (true); out_abort: vfs_smr_exit(); mtx_unlock(&nl->nl_lock); return (false); } static void cache_neg_promote(struct namecache *ncp) { struct neglist *nl; nl = NCP2NEGLIST(ncp); mtx_lock(&nl->nl_lock); cache_neg_promote_locked(ncp); mtx_unlock(&nl->nl_lock); } static void cache_neg_insert(struct namecache *ncp) { struct neglist *nl; MPASS(ncp->nc_flag & NCF_NEGATIVE); cache_assert_bucket_locked(ncp); nl = NCP2NEGLIST(ncp); mtx_lock(&nl->nl_lock); TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); mtx_unlock(&nl->nl_lock); atomic_add_long(&numneg, 1); } static void cache_neg_remove(struct namecache *ncp) { struct neglist *nl; struct negstate *ns; cache_assert_bucket_locked(ncp); nl = NCP2NEGLIST(ncp); ns = NCP2NEGSTATE(ncp); mtx_lock(&nl->nl_lock); if ((ns->neg_flag & NEG_HOT) != 0) { TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); nl->nl_hotnum--; } else { TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); } mtx_unlock(&nl->nl_lock); atomic_subtract_long(&numneg, 1); } static struct neglist * cache_neg_evict_select_list(void) { struct neglist *nl; u_int c; c = atomic_fetchadd_int(&neg_cycle, 1) + 1; nl = &neglists[c % numneglists]; if (!mtx_trylock(&nl->nl_evict_lock)) { counter_u64_add(neg_evict_skipped_contended, 1); return (NULL); } return (nl); } static struct namecache * cache_neg_evict_select_entry(struct neglist *nl) { struct namecache *ncp, *lncp; struct negstate *ns, *lns; int i; mtx_assert(&nl->nl_evict_lock, MA_OWNED); mtx_assert(&nl->nl_lock, MA_OWNED); ncp = TAILQ_FIRST(&nl->nl_list); if (ncp == NULL) return (NULL); lncp = ncp; lns = NCP2NEGSTATE(lncp); for (i = 1; i < 4; i++) { ncp = TAILQ_NEXT(ncp, nc_dst); if (ncp == NULL) break; ns = NCP2NEGSTATE(ncp); if (ns->neg_hit < lns->neg_hit) { lncp = ncp; lns = ns; } } return (lncp); } static bool cache_neg_evict(void) { struct namecache *ncp, *ncp2; struct neglist *nl; struct vnode *dvp; struct mtx *dvlp; struct mtx *blp; uint32_t hash; u_char nlen; bool evicted; nl = cache_neg_evict_select_list(); if (nl == NULL) { return (false); } mtx_lock(&nl->nl_lock); ncp = TAILQ_FIRST(&nl->nl_hotlist); if (ncp != NULL) { cache_neg_demote_locked(ncp); } ncp = cache_neg_evict_select_entry(nl); if (ncp == NULL) { counter_u64_add(neg_evict_skipped_empty, 1); mtx_unlock(&nl->nl_lock); mtx_unlock(&nl->nl_evict_lock); return (false); } nlen = ncp->nc_nlen; dvp = ncp->nc_dvp; hash = cache_get_hash(ncp->nc_name, nlen, dvp); dvlp = VP2VNODELOCK(dvp); blp = HASH2BUCKETLOCK(hash); mtx_unlock(&nl->nl_lock); mtx_unlock(&nl->nl_evict_lock); mtx_lock(dvlp); mtx_lock(blp); /* * Note that since all locks were dropped above, the entry may be * gone or reallocated to be something else. */ CK_SLIST_FOREACH(ncp2, (NCHHASH(hash)), nc_hash) { if (ncp2 == ncp && ncp2->nc_dvp == dvp && ncp2->nc_nlen == nlen && (ncp2->nc_flag & NCF_NEGATIVE) != 0) break; } if (ncp2 == NULL) { counter_u64_add(neg_evict_skipped_missed, 1); ncp = NULL; evicted = false; } else { MPASS(dvlp == VP2VNODELOCK(ncp->nc_dvp)); MPASS(blp == NCP2BUCKETLOCK(ncp)); SDT_PROBE2(vfs, namecache, evict_negative, done, ncp->nc_dvp, ncp->nc_name); cache_zap_locked(ncp); counter_u64_add(neg_evicted, 1); evicted = true; } mtx_unlock(blp); mtx_unlock(dvlp); if (ncp != NULL) cache_free(ncp); return (evicted); } /* * Maybe evict a negative entry to create more room. * * The ncnegfactor parameter limits what fraction of the total count * can comprise of negative entries. However, if the cache is just * warming up this leads to excessive evictions. As such, ncnegminpct * (recomputed to neg_min) dictates whether the above should be * applied. * * Try evicting if the cache is close to full capacity regardless of * other considerations. */ static bool cache_neg_evict_cond(u_long lnumcache) { u_long lnumneg; if (ncsize - 1000 < lnumcache) goto out_evict; lnumneg = atomic_load_long(&numneg); if (lnumneg < neg_min) return (false); if (lnumneg * ncnegfactor < lnumcache) return (false); out_evict: return (cache_neg_evict()); } /* * cache_zap_locked(): * * Removes a namecache entry from cache, whether it contains an actual * pointer to a vnode or if it is just a negative cache entry. */ static void cache_zap_locked(struct namecache *ncp) { struct nchashhead *ncpp; struct vnode *dvp, *vp; dvp = ncp->nc_dvp; vp = ncp->nc_vp; if (!(ncp->nc_flag & NCF_NEGATIVE)) cache_assert_vnode_locked(vp); cache_assert_vnode_locked(dvp); cache_assert_bucket_locked(ncp); cache_ncp_invalidate(ncp); ncpp = NCP2BUCKET(ncp); CK_SLIST_REMOVE(ncpp, ncp, namecache, nc_hash); if (!(ncp->nc_flag & NCF_NEGATIVE)) { SDT_PROBE3(vfs, namecache, zap, done, dvp, ncp->nc_name, vp); TAILQ_REMOVE(&vp->v_cache_dst, ncp, nc_dst); if (ncp == vp->v_cache_dd) { atomic_store_ptr(&vp->v_cache_dd, NULL); } } else { SDT_PROBE2(vfs, namecache, zap_negative, done, dvp, ncp->nc_name); cache_neg_remove(ncp); } if (ncp->nc_flag & NCF_ISDOTDOT) { if (ncp == dvp->v_cache_dd) { atomic_store_ptr(&dvp->v_cache_dd, NULL); } } else { LIST_REMOVE(ncp, nc_src); if (LIST_EMPTY(&dvp->v_cache_src)) { ncp->nc_flag |= NCF_DVDROP; } } } static void cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp) { struct mtx *blp; MPASS(ncp->nc_dvp == vp); MPASS(ncp->nc_flag & NCF_NEGATIVE); cache_assert_vnode_locked(vp); blp = NCP2BUCKETLOCK(ncp); mtx_lock(blp); cache_zap_locked(ncp); mtx_unlock(blp); } static bool cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp, struct mtx **vlpp) { struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; struct mtx *blp; MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); cache_assert_vnode_locked(vp); if (ncp->nc_flag & NCF_NEGATIVE) { if (*vlpp != NULL) { mtx_unlock(*vlpp); *vlpp = NULL; } cache_zap_negative_locked_vnode_kl(ncp, vp); return (true); } pvlp = VP2VNODELOCK(vp); blp = NCP2BUCKETLOCK(ncp); vlp1 = VP2VNODELOCK(ncp->nc_dvp); vlp2 = VP2VNODELOCK(ncp->nc_vp); if (*vlpp == vlp1 || *vlpp == vlp2) { to_unlock = *vlpp; *vlpp = NULL; } else { if (*vlpp != NULL) { mtx_unlock(*vlpp); *vlpp = NULL; } cache_sort_vnodes(&vlp1, &vlp2); if (vlp1 == pvlp) { mtx_lock(vlp2); to_unlock = vlp2; } else { if (!mtx_trylock(vlp1)) goto out_relock; to_unlock = vlp1; } } mtx_lock(blp); cache_zap_locked(ncp); mtx_unlock(blp); if (to_unlock != NULL) mtx_unlock(to_unlock); return (true); out_relock: mtx_unlock(vlp2); mtx_lock(vlp1); mtx_lock(vlp2); MPASS(*vlpp == NULL); *vlpp = vlp1; return (false); } /* * If trylocking failed we can get here. We know enough to take all needed locks * in the right order and re-lookup the entry. */ static int cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp, struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash, struct mtx *blp) { struct namecache *rncp; struct mtx *rvlp; cache_assert_bucket_unlocked(ncp); cache_sort_vnodes(&dvlp, &vlp); cache_lock_vnodes(dvlp, vlp); mtx_lock(blp); CK_SLIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) { if (rncp == ncp && cache_ncp_match(rncp, dvp, cnp)) break; } if (rncp == NULL) goto out_mismatch; if (!(ncp->nc_flag & NCF_NEGATIVE)) rvlp = VP2VNODELOCK(rncp->nc_vp); else rvlp = NULL; if (rvlp != vlp) goto out_mismatch; cache_zap_locked(rncp); mtx_unlock(blp); cache_unlock_vnodes(dvlp, vlp); atomic_add_long(&zap_bucket_relock_success, 1); return (0); out_mismatch: mtx_unlock(blp); cache_unlock_vnodes(dvlp, vlp); return (EAGAIN); } static int __noinline cache_zap_locked_bucket(struct namecache *ncp, struct componentname *cnp, uint32_t hash, struct mtx *blp) { struct mtx *dvlp, *vlp; struct vnode *dvp; cache_assert_bucket_locked(ncp); dvlp = VP2VNODELOCK(ncp->nc_dvp); vlp = NULL; if (!(ncp->nc_flag & NCF_NEGATIVE)) vlp = VP2VNODELOCK(ncp->nc_vp); if (cache_trylock_vnodes(dvlp, vlp) == 0) { cache_zap_locked(ncp); mtx_unlock(blp); cache_unlock_vnodes(dvlp, vlp); return (0); } dvp = ncp->nc_dvp; mtx_unlock(blp); return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp)); } static __noinline int cache_remove_cnp(struct vnode *dvp, struct componentname *cnp) { struct namecache *ncp; struct mtx *blp; struct mtx *dvlp, *dvlp2; uint32_t hash; int error; if (cnp->cn_namelen == 2 && cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { dvlp = VP2VNODELOCK(dvp); dvlp2 = NULL; mtx_lock(dvlp); retry_dotdot: ncp = dvp->v_cache_dd; if (ncp == NULL) { mtx_unlock(dvlp); if (dvlp2 != NULL) mtx_unlock(dvlp2); SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); return (0); } if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { if (!cache_zap_locked_vnode_kl2(ncp, dvp, &dvlp2)) goto retry_dotdot; MPASS(dvp->v_cache_dd == NULL); mtx_unlock(dvlp); if (dvlp2 != NULL) mtx_unlock(dvlp2); cache_free(ncp); } else { atomic_store_ptr(&dvp->v_cache_dd, NULL); mtx_unlock(dvlp); if (dvlp2 != NULL) mtx_unlock(dvlp2); } SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); return (1); } /* * XXX note that access here is completely unlocked with no provisions * to keep the hash allocated. If one is sufficiently unlucky a * parallel cache resize can reallocate the hash, unmap backing pages * and cause the empty check below to fault. * * Fixing this has epsilon priority, but can be done with no overhead * for this codepath with sufficient effort. */ hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); blp = HASH2BUCKETLOCK(hash); retry: if (CK_SLIST_EMPTY(NCHHASH(hash))) goto out_no_entry; mtx_lock(blp); ncp = cache_ncp_find(dvp, cnp, hash); if (ncp == NULL) { mtx_unlock(blp); goto out_no_entry; } error = cache_zap_locked_bucket(ncp, cnp, hash, blp); if (__predict_false(error != 0)) { atomic_add_long(&zap_bucket_fail, 1); goto retry; } counter_u64_add(numposzaps, 1); SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); cache_free(ncp); return (1); out_no_entry: counter_u64_add(nummisszap, 1); SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); return (0); } static int __noinline cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct timespec *tsp, int *ticksp) { int ltype; *vpp = dvp; SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); if (tsp != NULL) timespecclear(tsp); if (ticksp != NULL) *ticksp = ticks; vrefact(*vpp); /* * When we lookup "." we still can be asked to lock it * differently... */ ltype = cnp->cn_lkflags & LK_TYPE_MASK; if (ltype != VOP_ISLOCKED(*vpp)) { if (ltype == LK_EXCLUSIVE) { vn_lock(*vpp, LK_UPGRADE | LK_RETRY); if (VN_IS_DOOMED((*vpp))) { /* forced unmount */ vrele(*vpp); *vpp = NULL; return (ENOENT); } } else vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); } return (-1); } static int __noinline cache_lookup_dotdot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct timespec *tsp, int *ticksp) { struct namecache_ts *ncp_ts; struct namecache *ncp; struct mtx *dvlp; enum vgetstate vs; int error, ltype; bool whiteout; MPASS((cnp->cn_flags & ISDOTDOT) != 0); if ((cnp->cn_flags & MAKEENTRY) == 0) { cache_remove_cnp(dvp, cnp); return (0); } retry: dvlp = VP2VNODELOCK(dvp); mtx_lock(dvlp); ncp = dvp->v_cache_dd; if (ncp == NULL) { SDT_PROBE2(vfs, namecache, lookup, miss, dvp, ".."); mtx_unlock(dvlp); return (0); } if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { if (ncp->nc_flag & NCF_NEGATIVE) *vpp = NULL; else *vpp = ncp->nc_vp; } else *vpp = ncp->nc_dvp; if (*vpp == NULL) goto negative_success; SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", *vpp); cache_out_ts(ncp, tsp, ticksp); if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == NCF_DTS && tsp != NULL) { ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); *tsp = ncp_ts->nc_dotdottime; } MPASS(dvp != *vpp); ltype = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp); vs = vget_prep(*vpp); mtx_unlock(dvlp); error = vget_finish(*vpp, cnp->cn_lkflags, vs); vn_lock(dvp, ltype | LK_RETRY); if (VN_IS_DOOMED(dvp)) { if (error == 0) vput(*vpp); *vpp = NULL; return (ENOENT); } if (error) { *vpp = NULL; goto retry; } return (-1); negative_success: if (__predict_false(cnp->cn_nameiop == CREATE)) { if (cnp->cn_flags & ISLASTCN) { counter_u64_add(numnegzaps, 1); cache_zap_negative_locked_vnode_kl(ncp, dvp); mtx_unlock(dvlp); cache_free(ncp); return (0); } } whiteout = (ncp->nc_flag & NCF_WHITE); cache_out_ts(ncp, tsp, ticksp); if (cache_neg_hit_prep(ncp)) cache_neg_promote(ncp); else cache_neg_hit_finish(ncp); mtx_unlock(dvlp); if (whiteout) cnp->cn_flags |= ISWHITEOUT; return (ENOENT); } /** * Lookup a name in the name cache * * # Arguments * * - dvp: Parent directory in which to search. * - vpp: Return argument. Will contain desired vnode on cache hit. * - cnp: Parameters of the name search. The most interesting bits of * the cn_flags field have the following meanings: * - MAKEENTRY: If clear, free an entry from the cache rather than look * it up. * - ISDOTDOT: Must be set if and only if cn_nameptr == ".." * - tsp: Return storage for cache timestamp. On a successful (positive * or negative) lookup, tsp will be filled with any timespec that * was stored when this cache entry was created. However, it will * be clear for "." entries. * - ticks: Return storage for alternate cache timestamp. On a successful * (positive or negative) lookup, it will contain the ticks value * that was current when the cache entry was created, unless cnp * was ".". * * Either both tsp and ticks have to be provided or neither of them. * * # Returns * * - -1: A positive cache hit. vpp will contain the desired vnode. * - ENOENT: A negative cache hit, or dvp was recycled out from under us due * to a forced unmount. vpp will not be modified. If the entry * is a whiteout, then the ISWHITEOUT flag will be set in * cnp->cn_flags. * - 0: A cache miss. vpp will not be modified. * * # Locking * * On a cache hit, vpp will be returned locked and ref'd. If we're looking up * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the * lock is not recursively acquired. */ static int __noinline cache_lookup_fallback(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct timespec *tsp, int *ticksp) { struct namecache *ncp; struct mtx *blp; uint32_t hash; enum vgetstate vs; int error; bool whiteout; MPASS((cnp->cn_flags & ISDOTDOT) == 0); MPASS((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) != 0); retry: hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); blp = HASH2BUCKETLOCK(hash); mtx_lock(blp); ncp = cache_ncp_find(dvp, cnp, hash); if (__predict_false(ncp == NULL)) { mtx_unlock(blp); SDT_PROBE2(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr); counter_u64_add(nummiss, 1); return (0); } if (ncp->nc_flag & NCF_NEGATIVE) goto negative_success; counter_u64_add(numposhits, 1); *vpp = ncp->nc_vp; SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); cache_out_ts(ncp, tsp, ticksp); MPASS(dvp != *vpp); vs = vget_prep(*vpp); mtx_unlock(blp); error = vget_finish(*vpp, cnp->cn_lkflags, vs); if (error) { *vpp = NULL; goto retry; } return (-1); negative_success: /* * We don't get here with regular lookup apart from corner cases. */ if (__predict_true(cnp->cn_nameiop == CREATE)) { if (cnp->cn_flags & ISLASTCN) { counter_u64_add(numnegzaps, 1); error = cache_zap_locked_bucket(ncp, cnp, hash, blp); if (__predict_false(error != 0)) { atomic_add_long(&zap_bucket_fail2, 1); goto retry; } cache_free(ncp); return (0); } } whiteout = (ncp->nc_flag & NCF_WHITE); cache_out_ts(ncp, tsp, ticksp); if (cache_neg_hit_prep(ncp)) cache_neg_promote(ncp); else cache_neg_hit_finish(ncp); mtx_unlock(blp); if (whiteout) cnp->cn_flags |= ISWHITEOUT; return (ENOENT); } int cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct timespec *tsp, int *ticksp) { struct namecache *ncp; uint32_t hash; enum vgetstate vs; int error; bool whiteout, neg_promote; u_short nc_flag; MPASS((tsp == NULL && ticksp == NULL) || (tsp != NULL && ticksp != NULL)); #ifdef DEBUG_CACHE if (__predict_false(!doingcache)) { cnp->cn_flags &= ~MAKEENTRY; return (0); } #endif if (__predict_false(cnp->cn_nameptr[0] == '.')) { if (cnp->cn_namelen == 1) return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp)); if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') return (cache_lookup_dotdot(dvp, vpp, cnp, tsp, ticksp)); } MPASS((cnp->cn_flags & ISDOTDOT) == 0); if ((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) == 0) { cache_remove_cnp(dvp, cnp); return (0); } hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); vfs_smr_enter(); ncp = cache_ncp_find(dvp, cnp, hash); if (__predict_false(ncp == NULL)) { vfs_smr_exit(); SDT_PROBE2(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr); counter_u64_add(nummiss, 1); return (0); } nc_flag = atomic_load_char(&ncp->nc_flag); if (nc_flag & NCF_NEGATIVE) goto negative_success; counter_u64_add(numposhits, 1); *vpp = ncp->nc_vp; SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); cache_out_ts(ncp, tsp, ticksp); MPASS(dvp != *vpp); if (!cache_ncp_canuse(ncp)) { vfs_smr_exit(); *vpp = NULL; goto out_fallback; } vs = vget_prep_smr(*vpp); vfs_smr_exit(); if (__predict_false(vs == VGET_NONE)) { *vpp = NULL; goto out_fallback; } error = vget_finish(*vpp, cnp->cn_lkflags, vs); if (error) { *vpp = NULL; goto out_fallback; } return (-1); negative_success: if (cnp->cn_nameiop == CREATE) { if (cnp->cn_flags & ISLASTCN) { vfs_smr_exit(); goto out_fallback; } } cache_out_ts(ncp, tsp, ticksp); whiteout = (atomic_load_char(&ncp->nc_flag) & NCF_WHITE); neg_promote = cache_neg_hit_prep(ncp); if (!cache_ncp_canuse(ncp)) { cache_neg_hit_abort(ncp); vfs_smr_exit(); goto out_fallback; } if (neg_promote) { vfs_smr_exit(); if (!cache_neg_promote_cond(dvp, cnp, ncp, hash)) goto out_fallback; } else { cache_neg_hit_finish(ncp); vfs_smr_exit(); } if (whiteout) cnp->cn_flags |= ISWHITEOUT; return (ENOENT); out_fallback: return (cache_lookup_fallback(dvp, vpp, cnp, tsp, ticksp)); } struct celockstate { struct mtx *vlp[3]; struct mtx *blp[2]; }; CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3)); CTASSERT((nitems(((struct celockstate *)0)->blp) == 2)); static inline void cache_celockstate_init(struct celockstate *cel) { bzero(cel, sizeof(*cel)); } static void cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp, struct vnode *dvp) { struct mtx *vlp1, *vlp2; MPASS(cel->vlp[0] == NULL); MPASS(cel->vlp[1] == NULL); MPASS(cel->vlp[2] == NULL); MPASS(vp != NULL || dvp != NULL); vlp1 = VP2VNODELOCK(vp); vlp2 = VP2VNODELOCK(dvp); cache_sort_vnodes(&vlp1, &vlp2); if (vlp1 != NULL) { mtx_lock(vlp1); cel->vlp[0] = vlp1; } mtx_lock(vlp2); cel->vlp[1] = vlp2; } static void cache_unlock_vnodes_cel(struct celockstate *cel) { MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL); if (cel->vlp[0] != NULL) mtx_unlock(cel->vlp[0]); if (cel->vlp[1] != NULL) mtx_unlock(cel->vlp[1]); if (cel->vlp[2] != NULL) mtx_unlock(cel->vlp[2]); } static bool cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp) { struct mtx *vlp; bool ret; cache_assert_vlp_locked(cel->vlp[0]); cache_assert_vlp_locked(cel->vlp[1]); MPASS(cel->vlp[2] == NULL); MPASS(vp != NULL); vlp = VP2VNODELOCK(vp); ret = true; if (vlp >= cel->vlp[1]) { mtx_lock(vlp); } else { if (mtx_trylock(vlp)) goto out; cache_unlock_vnodes_cel(cel); atomic_add_long(&cache_lock_vnodes_cel_3_failures, 1); if (vlp < cel->vlp[0]) { mtx_lock(vlp); mtx_lock(cel->vlp[0]); mtx_lock(cel->vlp[1]); } else { if (cel->vlp[0] != NULL) mtx_lock(cel->vlp[0]); mtx_lock(vlp); mtx_lock(cel->vlp[1]); } ret = false; } out: cel->vlp[2] = vlp; return (ret); } static void cache_lock_buckets_cel(struct celockstate *cel, struct mtx *blp1, struct mtx *blp2) { MPASS(cel->blp[0] == NULL); MPASS(cel->blp[1] == NULL); cache_sort_vnodes(&blp1, &blp2); if (blp1 != NULL) { mtx_lock(blp1); cel->blp[0] = blp1; } mtx_lock(blp2); cel->blp[1] = blp2; } static void cache_unlock_buckets_cel(struct celockstate *cel) { if (cel->blp[0] != NULL) mtx_unlock(cel->blp[0]); mtx_unlock(cel->blp[1]); } /* * Lock part of the cache affected by the insertion. * * This means vnodelocks for dvp, vp and the relevant bucketlock. * However, insertion can result in removal of an old entry. In this * case we have an additional vnode and bucketlock pair to lock. * * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while * preserving the locking order (smaller address first). */ static void cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, uint32_t hash) { struct namecache *ncp; struct mtx *blps[2]; u_char nc_flag; blps[0] = HASH2BUCKETLOCK(hash); for (;;) { blps[1] = NULL; cache_lock_vnodes_cel(cel, dvp, vp); if (vp == NULL || vp->v_type != VDIR) break; ncp = atomic_load_consume_ptr(&vp->v_cache_dd); if (ncp == NULL) break; nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_ISDOTDOT) == 0) break; MPASS(ncp->nc_dvp == vp); blps[1] = NCP2BUCKETLOCK(ncp); if ((nc_flag & NCF_NEGATIVE) != 0) break; if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) break; /* * All vnodes got re-locked. Re-validate the state and if * nothing changed we are done. Otherwise restart. */ if (ncp == vp->v_cache_dd && (ncp->nc_flag & NCF_ISDOTDOT) != 0 && blps[1] == NCP2BUCKETLOCK(ncp) && VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) break; cache_unlock_vnodes_cel(cel); cel->vlp[0] = NULL; cel->vlp[1] = NULL; cel->vlp[2] = NULL; } cache_lock_buckets_cel(cel, blps[0], blps[1]); } static void cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, uint32_t hash) { struct namecache *ncp; struct mtx *blps[2]; u_char nc_flag; blps[0] = HASH2BUCKETLOCK(hash); for (;;) { blps[1] = NULL; cache_lock_vnodes_cel(cel, dvp, vp); ncp = atomic_load_consume_ptr(&dvp->v_cache_dd); if (ncp == NULL) break; nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_ISDOTDOT) == 0) break; MPASS(ncp->nc_dvp == dvp); blps[1] = NCP2BUCKETLOCK(ncp); if ((nc_flag & NCF_NEGATIVE) != 0) break; if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) break; if (ncp == dvp->v_cache_dd && (ncp->nc_flag & NCF_ISDOTDOT) != 0 && blps[1] == NCP2BUCKETLOCK(ncp) && VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) break; cache_unlock_vnodes_cel(cel); cel->vlp[0] = NULL; cel->vlp[1] = NULL; cel->vlp[2] = NULL; } cache_lock_buckets_cel(cel, blps[0], blps[1]); } static void cache_enter_unlock(struct celockstate *cel) { cache_unlock_buckets_cel(cel); cache_unlock_vnodes_cel(cel); } static void __noinline cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) { struct celockstate cel; struct namecache *ncp; uint32_t hash; int len; if (atomic_load_ptr(&dvp->v_cache_dd) == NULL) return; len = cnp->cn_namelen; cache_celockstate_init(&cel); hash = cache_get_hash(cnp->cn_nameptr, len, dvp); cache_enter_lock_dd(&cel, dvp, vp, hash); ncp = dvp->v_cache_dd; if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) { KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent")); cache_zap_locked(ncp); } else { ncp = NULL; } atomic_store_ptr(&dvp->v_cache_dd, NULL); cache_enter_unlock(&cel); if (ncp != NULL) cache_free(ncp); } /* * Add an entry to the cache. */ void cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, struct timespec *tsp, struct timespec *dtsp) { struct celockstate cel; struct namecache *ncp, *n2, *ndd; struct namecache_ts *ncp_ts; uint32_t hash; int flag; int len; KASSERT(cnp->cn_namelen <= NAME_MAX, ("%s: passed len %ld exceeds NAME_MAX (%d)", __func__, cnp->cn_namelen, NAME_MAX)); VNPASS(!VN_IS_DOOMED(dvp), dvp); VNPASS(dvp->v_type != VNON, dvp); if (vp != NULL) { VNPASS(!VN_IS_DOOMED(vp), vp); VNPASS(vp->v_type != VNON, vp); } if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { KASSERT(dvp == vp, ("%s: different vnodes for dot entry (%p; %p)\n", __func__, dvp, vp)); } else { KASSERT(dvp != vp, ("%s: same vnode for non-dot entry [%s] (%p)\n", __func__, cnp->cn_nameptr, dvp)); } #ifdef DEBUG_CACHE if (__predict_false(!doingcache)) return; #endif flag = 0; if (__predict_false(cnp->cn_nameptr[0] == '.')) { if (cnp->cn_namelen == 1) return; if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { cache_enter_dotdot_prep(dvp, vp, cnp); flag = NCF_ISDOTDOT; } } ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); if (ncp == NULL) return; cache_celockstate_init(&cel); ndd = NULL; ncp_ts = NULL; /* * Calculate the hash key and setup as much of the new * namecache entry as possible before acquiring the lock. */ ncp->nc_flag = flag | NCF_WIP; ncp->nc_vp = vp; if (vp == NULL) cache_neg_init(ncp); ncp->nc_dvp = dvp; if (tsp != NULL) { ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); ncp_ts->nc_time = *tsp; ncp_ts->nc_ticks = ticks; ncp_ts->nc_nc.nc_flag |= NCF_TS; if (dtsp != NULL) { ncp_ts->nc_dotdottime = *dtsp; ncp_ts->nc_nc.nc_flag |= NCF_DTS; } } len = ncp->nc_nlen = cnp->cn_namelen; hash = cache_get_hash(cnp->cn_nameptr, len, dvp); memcpy(ncp->nc_name, cnp->cn_nameptr, len); ncp->nc_name[len] = '\0'; cache_enter_lock(&cel, dvp, vp, hash); /* * See if this vnode or negative entry is already in the cache * with this name. This can happen with concurrent lookups of * the same path name. */ n2 = cache_ncp_find(dvp, cnp, hash); if (n2 != NULL) { MPASS(cache_ncp_canuse(n2)); if ((n2->nc_flag & NCF_NEGATIVE) != 0) KASSERT(vp == NULL, ("%s: found entry pointing to a different vnode " "(%p != %p); name [%s]", __func__, NULL, vp, cnp->cn_nameptr)); else KASSERT(n2->nc_vp == vp, ("%s: found entry pointing to a different vnode " "(%p != %p); name [%s]", __func__, n2->nc_vp, vp, cnp->cn_nameptr)); /* * Entries are supposed to be immutable unless in the * process of getting destroyed. Accommodating for * changing timestamps is possible but not worth it. * This should be harmless in terms of correctness, in * the worst case resulting in an earlier expiration. * Alternatively, the found entry can be replaced * altogether. */ MPASS((n2->nc_flag & (NCF_TS | NCF_DTS)) == (ncp->nc_flag & (NCF_TS | NCF_DTS))); #if 0 if (tsp != NULL) { KASSERT((n2->nc_flag & NCF_TS) != 0, ("no NCF_TS")); n2_ts = __containerof(n2, struct namecache_ts, nc_nc); n2_ts->nc_time = ncp_ts->nc_time; n2_ts->nc_ticks = ncp_ts->nc_ticks; if (dtsp != NULL) { n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime; n2_ts->nc_nc.nc_flag |= NCF_DTS; } } #endif SDT_PROBE3(vfs, namecache, enter, duplicate, dvp, ncp->nc_name, vp); goto out_unlock_free; } if (flag == NCF_ISDOTDOT) { /* * See if we are trying to add .. entry, but some other lookup * has populated v_cache_dd pointer already. */ if (dvp->v_cache_dd != NULL) goto out_unlock_free; KASSERT(vp == NULL || vp->v_type == VDIR, ("wrong vnode type %p", vp)); atomic_thread_fence_rel(); atomic_store_ptr(&dvp->v_cache_dd, ncp); } else if (vp != NULL) { /* * For this case, the cache entry maps both the * directory name in it and the name ".." for the * directory's parent. */ if ((ndd = vp->v_cache_dd) != NULL) { if ((ndd->nc_flag & NCF_ISDOTDOT) != 0) cache_zap_locked(ndd); else ndd = NULL; } atomic_thread_fence_rel(); atomic_store_ptr(&vp->v_cache_dd, ncp); } if (flag != NCF_ISDOTDOT) { if (LIST_EMPTY(&dvp->v_cache_src)) { cache_hold_vnode(dvp); } LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); } /* * If the entry is "negative", we place it into the * "negative" cache queue, otherwise, we place it into the * destination vnode's cache entries queue. */ if (vp != NULL) { TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name, vp); } else { if (cnp->cn_flags & ISWHITEOUT) atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_WHITE); cache_neg_insert(ncp); SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, ncp->nc_name); } /* * Insert the new namecache entry into the appropriate chain * within the cache entries table. */ CK_SLIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); atomic_thread_fence_rel(); /* * Mark the entry as fully constructed. * It is immutable past this point until its removal. */ atomic_store_char(&ncp->nc_flag, ncp->nc_flag & ~NCF_WIP); cache_enter_unlock(&cel); if (ndd != NULL) cache_free(ndd); return; out_unlock_free: cache_enter_unlock(&cel); cache_free(ncp); return; } /* * A variant of the above accepting flags. * * - VFS_CACHE_DROPOLD -- if a conflicting entry is found, drop it. * * TODO: this routine is a hack. It blindly removes the old entry, even if it * happens to match and it is doing it in an inefficient manner. It was added * to accommodate NFS which runs into a case where the target for a given name * may change from under it. Note this does nothing to solve the following * race: 2 callers of cache_enter_time_flags pass a different target vnode for * the same [dvp, cnp]. It may be argued that code doing this is broken. */ void cache_enter_time_flags(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, struct timespec *tsp, struct timespec *dtsp, int flags) { MPASS((flags & ~(VFS_CACHE_DROPOLD)) == 0); if (flags & VFS_CACHE_DROPOLD) cache_remove_cnp(dvp, cnp); cache_enter_time(dvp, vp, cnp, tsp, dtsp); } static u_long cache_roundup_2(u_long val) { u_long res; for (res = 1; res <= val; res <<= 1) continue; return (res); } static struct nchashhead * nchinittbl(u_long elements, u_long *hashmask) { struct nchashhead *hashtbl; u_long hashsize, i; hashsize = cache_roundup_2(elements) / 2; hashtbl = malloc(hashsize * sizeof(*hashtbl), M_VFSCACHE, M_WAITOK); for (i = 0; i < hashsize; i++) CK_SLIST_INIT(&hashtbl[i]); *hashmask = hashsize - 1; return (hashtbl); } static void ncfreetbl(struct nchashhead *hashtbl) { free(hashtbl, M_VFSCACHE); } /* * Name cache initialization, from vfs_init() when we are booting */ static void nchinit(void *dummy __unused) { u_int i; cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL_SIZE, NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); cache_zone_small_ts = uma_zcreate("STS VFS Cache", CACHE_ZONE_SMALL_TS_SIZE, NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE_SIZE, NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); cache_zone_large_ts = uma_zcreate("LTS VFS Cache", CACHE_ZONE_LARGE_TS_SIZE, NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); VFS_SMR_ZONE_SET(cache_zone_small); VFS_SMR_ZONE_SET(cache_zone_small_ts); VFS_SMR_ZONE_SET(cache_zone_large); VFS_SMR_ZONE_SET(cache_zone_large_ts); ncsize = desiredvnodes * ncsizefactor; cache_recalc_neg_min(); nchashtbl = nchinittbl(ncsize, &nchash); ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1; if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */ ncbuckethash = 7; if (ncbuckethash > nchash) ncbuckethash = nchash; bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE, M_WAITOK | M_ZERO); for (i = 0; i < numbucketlocks; i++) mtx_init(&bucketlocks[i], "ncbuc", NULL, MTX_DUPOK | MTX_RECURSE); ncvnodehash = ncbuckethash; vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE, M_WAITOK | M_ZERO); for (i = 0; i < numvnodelocks; i++) mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE); for (i = 0; i < numneglists; i++) { mtx_init(&neglists[i].nl_evict_lock, "ncnege", NULL, MTX_DEF); mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF); TAILQ_INIT(&neglists[i].nl_list); TAILQ_INIT(&neglists[i].nl_hotlist); } } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); void cache_vnode_init(struct vnode *vp) { LIST_INIT(&vp->v_cache_src); TAILQ_INIT(&vp->v_cache_dst); vp->v_cache_dd = NULL; cache_prehash(vp); } /* * Induce transient cache misses for lockless operation in cache_lookup() by * using a temporary hash table. * * This will force a fs lookup. * * Synchronisation is done in 2 steps, calling vfs_smr_synchronize each time * to observe all CPUs not performing the lookup. */ static void cache_changesize_set_temp(struct nchashhead *temptbl, u_long temphash) { MPASS(temphash < nchash); /* * Change the size. The new size is smaller and can safely be used * against the existing table. All lookups which now hash wrong will * result in a cache miss, which all callers are supposed to know how * to handle. */ atomic_store_long(&nchash, temphash); atomic_thread_fence_rel(); vfs_smr_synchronize(); /* * At this point everyone sees the updated hash value, but they still * see the old table. */ atomic_store_ptr(&nchashtbl, temptbl); atomic_thread_fence_rel(); vfs_smr_synchronize(); /* * At this point everyone sees the updated table pointer and size pair. */ } /* * Set the new hash table. * * Similarly to cache_changesize_set_temp(), this has to synchronize against * lockless operation in cache_lookup(). */ static void cache_changesize_set_new(struct nchashhead *new_tbl, u_long new_hash) { MPASS(nchash < new_hash); /* * Change the pointer first. This wont result in out of bounds access * since the temporary table is guaranteed to be smaller. */ atomic_store_ptr(&nchashtbl, new_tbl); atomic_thread_fence_rel(); vfs_smr_synchronize(); /* * At this point everyone sees the updated pointer value, but they * still see the old size. */ atomic_store_long(&nchash, new_hash); atomic_thread_fence_rel(); vfs_smr_synchronize(); /* * At this point everyone sees the updated table pointer and size pair. */ } void cache_changesize(u_long newmaxvnodes) { struct nchashhead *new_nchashtbl, *old_nchashtbl, *temptbl; u_long new_nchash, old_nchash, temphash; struct namecache *ncp; uint32_t hash; u_long newncsize; u_long i; newncsize = newmaxvnodes * ncsizefactor; newmaxvnodes = cache_roundup_2(newmaxvnodes * 2); if (newmaxvnodes < numbucketlocks) newmaxvnodes = numbucketlocks; new_nchashtbl = nchinittbl(newmaxvnodes, &new_nchash); /* If same hash table size, nothing to do */ if (nchash == new_nchash) { ncfreetbl(new_nchashtbl); return; } temptbl = nchinittbl(1, &temphash); /* * Move everything from the old hash table to the new table. * None of the namecache entries in the table can be removed * because to do so, they have to be removed from the hash table. */ cache_lock_all_vnodes(); cache_lock_all_buckets(); old_nchashtbl = nchashtbl; old_nchash = nchash; cache_changesize_set_temp(temptbl, temphash); for (i = 0; i <= old_nchash; i++) { while ((ncp = CK_SLIST_FIRST(&old_nchashtbl[i])) != NULL) { hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); CK_SLIST_REMOVE(&old_nchashtbl[i], ncp, namecache, nc_hash); CK_SLIST_INSERT_HEAD(&new_nchashtbl[hash & new_nchash], ncp, nc_hash); } } ncsize = newncsize; cache_recalc_neg_min(); cache_changesize_set_new(new_nchashtbl, new_nchash); cache_unlock_all_buckets(); cache_unlock_all_vnodes(); ncfreetbl(old_nchashtbl); ncfreetbl(temptbl); } /* * Remove all entries from and to a particular vnode. */ static void cache_purge_impl(struct vnode *vp) { struct cache_freebatch batch; struct namecache *ncp; struct mtx *vlp, *vlp2; TAILQ_INIT(&batch); vlp = VP2VNODELOCK(vp); vlp2 = NULL; mtx_lock(vlp); retry: while (!LIST_EMPTY(&vp->v_cache_src)) { ncp = LIST_FIRST(&vp->v_cache_src); if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) goto retry; TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); } while (!TAILQ_EMPTY(&vp->v_cache_dst)) { ncp = TAILQ_FIRST(&vp->v_cache_dst); if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) goto retry; TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); } ncp = vp->v_cache_dd; if (ncp != NULL) { KASSERT(ncp->nc_flag & NCF_ISDOTDOT, ("lost dotdot link")); if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) goto retry; TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); } KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); mtx_unlock(vlp); if (vlp2 != NULL) mtx_unlock(vlp2); cache_free_batch(&batch); } /* * Opportunistic check to see if there is anything to do. */ static bool cache_has_entries(struct vnode *vp) { if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) && atomic_load_ptr(&vp->v_cache_dd) == NULL) return (false); return (true); } void cache_purge(struct vnode *vp) { SDT_PROBE1(vfs, namecache, purge, done, vp); if (!cache_has_entries(vp)) return; cache_purge_impl(vp); } /* * Only to be used by vgone. */ void cache_purge_vgone(struct vnode *vp) { struct mtx *vlp; VNPASS(VN_IS_DOOMED(vp), vp); if (cache_has_entries(vp)) { cache_purge_impl(vp); return; } /* * Serialize against a potential thread doing cache_purge. */ vlp = VP2VNODELOCK(vp); mtx_wait_unlocked(vlp); if (cache_has_entries(vp)) { cache_purge_impl(vp); return; } return; } /* * Remove all negative entries for a particular directory vnode. */ void cache_purge_negative(struct vnode *vp) { struct cache_freebatch batch; struct namecache *ncp, *nnp; struct mtx *vlp; SDT_PROBE1(vfs, namecache, purge_negative, done, vp); if (LIST_EMPTY(&vp->v_cache_src)) return; TAILQ_INIT(&batch); vlp = VP2VNODELOCK(vp); mtx_lock(vlp); LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) { if (!(ncp->nc_flag & NCF_NEGATIVE)) continue; cache_zap_negative_locked_vnode_kl(ncp, vp); TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); } mtx_unlock(vlp); cache_free_batch(&batch); } /* * Entry points for modifying VOP operations. */ void cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp) { ASSERT_VOP_IN_SEQC(fdvp); ASSERT_VOP_IN_SEQC(fvp); ASSERT_VOP_IN_SEQC(tdvp); if (tvp != NULL) ASSERT_VOP_IN_SEQC(tvp); cache_purge(fvp); if (tvp != NULL) { cache_purge(tvp); KASSERT(!cache_remove_cnp(tdvp, tcnp), ("%s: lingering negative entry", __func__)); } else { cache_remove_cnp(tdvp, tcnp); } /* * TODO * * Historically renaming was always purging all revelang entries, * but that's quite wasteful. In particular turns out that in many cases * the target file is immediately accessed after rename, inducing a cache * miss. * * Recode this to reduce relocking and reuse the existing entry (if any) * instead of just removing it above and allocating a new one here. */ cache_enter(tdvp, fvp, tcnp); } void cache_vop_rmdir(struct vnode *dvp, struct vnode *vp) { ASSERT_VOP_IN_SEQC(dvp); ASSERT_VOP_IN_SEQC(vp); cache_purge(vp); } #ifdef INVARIANTS /* * Validate that if an entry exists it matches. */ void cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) { struct namecache *ncp; struct mtx *blp; uint32_t hash; hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); if (CK_SLIST_EMPTY(NCHHASH(hash))) return; blp = HASH2BUCKETLOCK(hash); mtx_lock(blp); ncp = cache_ncp_find(dvp, cnp, hash); if (ncp != NULL && ncp->nc_vp != vp) { panic("%s: mismatch (%p != %p); ncp %p [%s] dvp %p\n", __func__, vp, ncp->nc_vp, ncp, ncp->nc_name, ncp->nc_dvp); } mtx_unlock(blp); } void cache_assert_no_entries(struct vnode *vp) { VNPASS(TAILQ_EMPTY(&vp->v_cache_dst), vp); VNPASS(LIST_EMPTY(&vp->v_cache_src), vp); VNPASS(vp->v_cache_dd == NULL, vp); } #endif /* * Flush all entries referencing a particular filesystem. */ void cache_purgevfs(struct mount *mp) { struct vnode *vp, *mvp; size_t visited __sdt_used, purged __sdt_used; visited = purged = 0; /* * Somewhat wasteful iteration over all vnodes. Would be better to * support filtering and avoid the interlock to begin with. */ MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { visited++; if (!cache_has_entries(vp)) { VI_UNLOCK(vp); continue; } vholdl(vp); VI_UNLOCK(vp); cache_purge(vp); purged++; vdrop(vp); } SDT_PROBE3(vfs, namecache, purgevfs, done, mp, visited, purged); } /* * Perform canonical checks and cache lookup and pass on to filesystem * through the vop_cachedlookup only if needed. */ int vfs_cache_lookup(struct vop_lookup_args *ap) { struct vnode *dvp; int error; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; int flags = cnp->cn_flags; *vpp = NULL; dvp = ap->a_dvp; if (dvp->v_type != VDIR) return (ENOTDIR); if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) return (EROFS); error = vn_dir_check_exec(dvp, cnp); if (error != 0) return (error); error = cache_lookup(dvp, vpp, cnp, NULL, NULL); if (error == 0) return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); if (error == -1) return (0); return (error); } /* Implementation of the getcwd syscall. */ int sys___getcwd(struct thread *td, struct __getcwd_args *uap) { char *buf, *retbuf; size_t buflen; int error; buflen = uap->buflen; if (__predict_false(buflen < 2)) return (EINVAL); if (buflen > MAXPATHLEN) buflen = MAXPATHLEN; buf = uma_zalloc(namei_zone, M_WAITOK); error = vn_getcwd(buf, &retbuf, &buflen); if (error == 0) error = copyout(retbuf, uap->buf, buflen); uma_zfree(namei_zone, buf); return (error); } int vn_getcwd(char *buf, char **retbuf, size_t *buflen) { struct pwd *pwd; int error; vfs_smr_enter(); pwd = pwd_get_smr(); error = vn_fullpath_any_smr(pwd->pwd_cdir, pwd->pwd_rdir, buf, retbuf, buflen, 0); VFS_SMR_ASSERT_NOT_ENTERED(); if (error < 0) { pwd = pwd_hold(curthread); error = vn_fullpath_any(pwd->pwd_cdir, pwd->pwd_rdir, buf, retbuf, buflen); pwd_drop(pwd); } #ifdef KTRACE if (KTRPOINT(curthread, KTR_NAMEI) && error == 0) ktrnamei(*retbuf); #endif return (error); } /* * Canonicalize a path by walking it forward and back. * * BUGS: * - Nothing guarantees the integrity of the entire chain. Consider the case * where the path "foo/bar/baz/qux" is passed, but "bar" is moved out of * "foo" into "quux" during the backwards walk. The result will be * "quux/bar/baz/qux", which could not have been obtained by an incremental * walk in userspace. Moreover, the path we return is inaccessible if the * calling thread lacks permission to traverse "quux". */ static int kern___realpathat(struct thread *td, int fd, const char *path, char *buf, size_t size, int flags, enum uio_seg pathseg) { struct nameidata nd; char *retbuf, *freebuf; int error; if (flags != 0) return (EINVAL); NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | WANTPARENT | AUDITVNODE1, pathseg, path, fd, &cap_fstat_rights); if ((error = namei(&nd)) != 0) return (error); if (nd.ni_vp->v_type == VREG && nd.ni_dvp->v_type != VDIR && (nd.ni_vp->v_vflag & VV_ROOT) != 0) { struct vnode *covered_vp; /* * This happens if vp is a file mount. The call to * vn_fullpath_hardlink can panic if path resolution can't be * handled without the directory. * * To resolve this, we find the vnode which was mounted on - * this should have a unique global path since we disallow * mounting on linked files. */ error = vn_lock(nd.ni_vp, LK_SHARED); if (error != 0) goto out; covered_vp = nd.ni_vp->v_mount->mnt_vnodecovered; vref(covered_vp); VOP_UNLOCK(nd.ni_vp); error = vn_fullpath(covered_vp, &retbuf, &freebuf); vrele(covered_vp); } else { error = vn_fullpath_hardlink(nd.ni_vp, nd.ni_dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, &retbuf, &freebuf, &size); } if (error == 0) { size_t len; len = strlen(retbuf) + 1; if (size < len) error = ENAMETOOLONG; else if (pathseg == UIO_USERSPACE) error = copyout(retbuf, buf, len); else memcpy(buf, retbuf, len); free(freebuf, M_TEMP); } out: vrele(nd.ni_vp); vrele(nd.ni_dvp); NDFREE_PNBUF(&nd); return (error); } int sys___realpathat(struct thread *td, struct __realpathat_args *uap) { return (kern___realpathat(td, uap->fd, uap->path, uap->buf, uap->size, uap->flags, UIO_USERSPACE)); } /* * Retrieve the full filesystem path that correspond to a vnode from the name * cache (if available) */ int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf) { struct pwd *pwd; char *buf; size_t buflen; int error; if (__predict_false(vp == NULL)) return (EINVAL); buflen = MAXPATHLEN; buf = malloc(buflen, M_TEMP, M_WAITOK); vfs_smr_enter(); pwd = pwd_get_smr(); error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, &buflen, 0); VFS_SMR_ASSERT_NOT_ENTERED(); if (error < 0) { pwd = pwd_hold(curthread); error = vn_fullpath_any(vp, pwd->pwd_rdir, buf, retbuf, &buflen); pwd_drop(pwd); } if (error == 0) *freebuf = buf; else free(buf, M_TEMP); return (error); } /* * This function is similar to vn_fullpath, but it attempts to lookup the * pathname relative to the global root mount point. This is required for the * auditing sub-system, as audited pathnames must be absolute, relative to the * global root mount point. */ int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf) { char *buf; size_t buflen; int error; if (__predict_false(vp == NULL)) return (EINVAL); buflen = MAXPATHLEN; buf = malloc(buflen, M_TEMP, M_WAITOK); vfs_smr_enter(); error = vn_fullpath_any_smr(vp, rootvnode, buf, retbuf, &buflen, 0); VFS_SMR_ASSERT_NOT_ENTERED(); if (error < 0) { error = vn_fullpath_any(vp, rootvnode, buf, retbuf, &buflen); } if (error == 0) *freebuf = buf; else free(buf, M_TEMP); return (error); } static struct namecache * vn_dd_from_dst(struct vnode *vp) { struct namecache *ncp; cache_assert_vnode_locked(vp); TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) { if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) return (ncp); } return (NULL); } int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen) { struct vnode *dvp; struct namecache *ncp; struct mtx *vlp; int error; vlp = VP2VNODELOCK(*vp); mtx_lock(vlp); ncp = (*vp)->v_cache_dd; if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT) == 0) { KASSERT(ncp == vn_dd_from_dst(*vp), ("%s: mismatch for dd entry (%p != %p)", __func__, ncp, vn_dd_from_dst(*vp))); } else { ncp = vn_dd_from_dst(*vp); } if (ncp != NULL) { if (*buflen < ncp->nc_nlen) { mtx_unlock(vlp); vrele(*vp); counter_u64_add(numfullpathfail4, 1); error = ENOMEM; SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); return (error); } *buflen -= ncp->nc_nlen; memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, ncp->nc_name, vp); dvp = *vp; *vp = ncp->nc_dvp; vref(*vp); mtx_unlock(vlp); vrele(dvp); return (0); } SDT_PROBE1(vfs, namecache, fullpath, miss, vp); mtx_unlock(vlp); vn_lock(*vp, LK_SHARED | LK_RETRY); error = VOP_VPTOCNP(*vp, &dvp, buf, buflen); vput(*vp); if (error) { counter_u64_add(numfullpathfail2, 1); SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); return (error); } *vp = dvp; if (VN_IS_DOOMED(dvp)) { /* forced unmount */ vrele(dvp); error = ENOENT; SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); return (error); } /* * *vp has its use count incremented still. */ return (0); } /* * Resolve a directory to a pathname. * * The name of the directory can always be found in the namecache or fetched * from the filesystem. There is also guaranteed to be only one parent, meaning * we can just follow vnodes up until we find the root. * * The vnode must be referenced. */ static int vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *len, size_t addend) { #ifdef KDTRACE_HOOKS struct vnode *startvp = vp; #endif struct vnode *vp1; size_t buflen; int error; bool slash_prefixed; VNPASS(vp->v_type == VDIR || VN_IS_DOOMED(vp), vp); VNPASS(vp->v_usecount > 0, vp); buflen = *len; slash_prefixed = true; if (addend == 0) { MPASS(*len >= 2); buflen--; buf[buflen] = '\0'; slash_prefixed = false; } error = 0; SDT_PROBE1(vfs, namecache, fullpath, entry, vp); counter_u64_add(numfullpathcalls, 1); while (vp != rdir && vp != rootvnode) { /* * The vp vnode must be already fully constructed, * since it is either found in namecache or obtained * from VOP_VPTOCNP(). We may test for VV_ROOT safely * without obtaining the vnode lock. */ if ((vp->v_vflag & VV_ROOT) != 0) { vn_lock(vp, LK_RETRY | LK_SHARED); /* * With the vnode locked, check for races with * unmount, forced or not. Note that we * already verified that vp is not equal to * the root vnode, which means that * mnt_vnodecovered can be NULL only for the * case of unmount. */ if (VN_IS_DOOMED(vp) || (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || vp1->v_mountedhere != vp->v_mount) { vput(vp); error = ENOENT; SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); break; } vref(vp1); vput(vp); vp = vp1; continue; } VNPASS(vp->v_type == VDIR || VN_IS_DOOMED(vp), vp); error = vn_vptocnp(&vp, buf, &buflen); if (error) break; if (buflen == 0) { vrele(vp); error = ENOMEM; SDT_PROBE3(vfs, namecache, fullpath, return, error, startvp, NULL); break; } buf[--buflen] = '/'; slash_prefixed = true; } if (error) return (error); if (!slash_prefixed) { if (buflen == 0) { vrele(vp); counter_u64_add(numfullpathfail4, 1); SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, startvp, NULL); return (ENOMEM); } buf[--buflen] = '/'; } counter_u64_add(numfullpathfound, 1); vrele(vp); *retbuf = buf + buflen; SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, *retbuf); *len -= buflen; *len += addend; return (0); } /* * Resolve an arbitrary vnode to a pathname. * * Note 2 caveats: * - hardlinks are not tracked, thus if the vnode is not a directory this can * resolve to a different path than the one used to find it * - namecache is not mandatory, meaning names are not guaranteed to be added * (in which case resolving fails) */ static void __inline cache_rev_failed_impl(int *reason, int line) { *reason = line; } #define cache_rev_failed(var) cache_rev_failed_impl((var), __LINE__) static int vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *buflen, size_t addend) { #ifdef KDTRACE_HOOKS struct vnode *startvp = vp; #endif struct vnode *tvp; struct mount *mp; struct namecache *ncp; size_t orig_buflen; int reason; int error; #ifdef KDTRACE_HOOKS int i; #endif seqc_t vp_seqc, tvp_seqc; u_char nc_flag; VFS_SMR_ASSERT_ENTERED(); if (!atomic_load_char(&cache_fast_lookup_enabled)) { vfs_smr_exit(); return (-1); } orig_buflen = *buflen; if (addend == 0) { MPASS(*buflen >= 2); *buflen -= 1; buf[*buflen] = '\0'; } if (vp == rdir || vp == rootvnode) { if (addend == 0) { *buflen -= 1; buf[*buflen] = '/'; } goto out_ok; } #ifdef KDTRACE_HOOKS i = 0; #endif error = -1; ncp = NULL; /* for sdt probe down below */ vp_seqc = vn_seqc_read_any(vp); if (seqc_in_modify(vp_seqc)) { cache_rev_failed(&reason); goto out_abort; } for (;;) { #ifdef KDTRACE_HOOKS i++; #endif if ((vp->v_vflag & VV_ROOT) != 0) { mp = atomic_load_ptr(&vp->v_mount); if (mp == NULL) { cache_rev_failed(&reason); goto out_abort; } tvp = atomic_load_ptr(&mp->mnt_vnodecovered); tvp_seqc = vn_seqc_read_any(tvp); if (seqc_in_modify(tvp_seqc)) { cache_rev_failed(&reason); goto out_abort; } if (!vn_seqc_consistent(vp, vp_seqc)) { cache_rev_failed(&reason); goto out_abort; } vp = tvp; vp_seqc = tvp_seqc; continue; } ncp = atomic_load_consume_ptr(&vp->v_cache_dd); if (ncp == NULL) { cache_rev_failed(&reason); goto out_abort; } nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_ISDOTDOT) != 0) { cache_rev_failed(&reason); goto out_abort; } if (ncp->nc_nlen >= *buflen) { cache_rev_failed(&reason); error = ENOMEM; goto out_abort; } *buflen -= ncp->nc_nlen; memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); *buflen -= 1; buf[*buflen] = '/'; tvp = ncp->nc_dvp; tvp_seqc = vn_seqc_read_any(tvp); if (seqc_in_modify(tvp_seqc)) { cache_rev_failed(&reason); goto out_abort; } if (!vn_seqc_consistent(vp, vp_seqc)) { cache_rev_failed(&reason); goto out_abort; } /* * Acquire fence provided by vn_seqc_read_any above. */ if (__predict_false(atomic_load_ptr(&vp->v_cache_dd) != ncp)) { cache_rev_failed(&reason); goto out_abort; } if (!cache_ncp_canuse(ncp)) { cache_rev_failed(&reason); goto out_abort; } vp = tvp; vp_seqc = tvp_seqc; if (vp == rdir || vp == rootvnode) break; } out_ok: vfs_smr_exit(); *retbuf = buf + *buflen; *buflen = orig_buflen - *buflen + addend; SDT_PROBE2(vfs, namecache, fullpath_smr, hit, startvp, *retbuf); return (0); out_abort: *buflen = orig_buflen; SDT_PROBE4(vfs, namecache, fullpath_smr, miss, startvp, ncp, reason, i); vfs_smr_exit(); return (error); } static int vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, size_t *buflen) { size_t orig_buflen, addend; int error; if (*buflen < 2) return (EINVAL); orig_buflen = *buflen; vref(vp); addend = 0; if (vp->v_type != VDIR) { *buflen -= 1; buf[*buflen] = '\0'; error = vn_vptocnp(&vp, buf, buflen); if (error) return (error); if (*buflen == 0) { vrele(vp); return (ENOMEM); } *buflen -= 1; buf[*buflen] = '/'; addend = orig_buflen - *buflen; } return (vn_fullpath_dir(vp, rdir, buf, retbuf, buflen, addend)); } /* * Resolve an arbitrary vnode to a pathname (taking care of hardlinks). * * Since the namecache does not track hardlinks, the caller is expected to * first look up the target vnode with WANTPARENT flag passed to namei to get * dvp and vp. * * Then we have 2 cases: * - if the found vnode is a directory, the path can be constructed just by * following names up the chain * - otherwise we populate the buffer with the saved name and start resolving * from the parent */ int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp, const char *hrdl_name, size_t hrdl_name_length, char **retbuf, char **freebuf, size_t *buflen) { char *buf, *tmpbuf; struct pwd *pwd; size_t addend; int error; __enum_uint8(vtype) type; if (*buflen < 2) return (EINVAL); if (*buflen > MAXPATHLEN) *buflen = MAXPATHLEN; buf = malloc(*buflen, M_TEMP, M_WAITOK); addend = 0; /* * Check for VBAD to work around the vp_crossmp bug in lookup(). * * For example consider tmpfs on /tmp and realpath /tmp. ni_vp will be * set to mount point's root vnode while ni_dvp will be vp_crossmp. * If the type is VDIR (like in this very case) we can skip looking * at ni_dvp in the first place. However, since vnodes get passed here * unlocked the target may transition to doomed state (type == VBAD) * before we get to evaluate the condition. If this happens, we will * populate part of the buffer and descend to vn_fullpath_dir with * vp == vp_crossmp. Prevent the problem by checking for VBAD. */ type = atomic_load_8(&vp->v_type); if (type == VBAD) { error = ENOENT; goto out_bad; } if (type != VDIR) { addend = hrdl_name_length + 2; if (*buflen < addend) { error = ENOMEM; goto out_bad; } *buflen -= addend; tmpbuf = buf + *buflen; tmpbuf[0] = '/'; memcpy(&tmpbuf[1], hrdl_name, hrdl_name_length); tmpbuf[addend - 1] = '\0'; vp = dvp; } vfs_smr_enter(); pwd = pwd_get_smr(); error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, buflen, addend); VFS_SMR_ASSERT_NOT_ENTERED(); if (error < 0) { pwd = pwd_hold(curthread); vref(vp); error = vn_fullpath_dir(vp, pwd->pwd_rdir, buf, retbuf, buflen, addend); pwd_drop(pwd); } if (error != 0) goto out_bad; *freebuf = buf; return (0); out_bad: free(buf, M_TEMP); return (error); } struct vnode * vn_dir_dd_ino(struct vnode *vp) { struct namecache *ncp; struct vnode *ddvp; struct mtx *vlp; enum vgetstate vs; ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); vlp = VP2VNODELOCK(vp); mtx_lock(vlp); TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) continue; ddvp = ncp->nc_dvp; vs = vget_prep(ddvp); mtx_unlock(vlp); if (vget_finish(ddvp, LK_SHARED | LK_NOWAIT, vs)) return (NULL); return (ddvp); } mtx_unlock(vlp); return (NULL); } int vn_commname(struct vnode *vp, char *buf, u_int buflen) { struct namecache *ncp; struct mtx *vlp; int l; vlp = VP2VNODELOCK(vp); mtx_lock(vlp); TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) break; if (ncp == NULL) { mtx_unlock(vlp); return (ENOENT); } l = min(ncp->nc_nlen, buflen - 1); memcpy(buf, ncp->nc_name, l); mtx_unlock(vlp); buf[l] = '\0'; return (0); } /* * This function updates path string to vnode's full global path * and checks the size of the new path string against the pathlen argument. * * Requires a locked, referenced vnode. * Vnode is re-locked on success or ENODEV, otherwise unlocked. * * If vp is a directory, the call to vn_fullpath_global() always succeeds * because it falls back to the ".." lookup if the namecache lookup fails. */ int vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, u_int pathlen) { struct nameidata nd; struct vnode *vp1; char *rpath, *fbuf; int error; ASSERT_VOP_ELOCKED(vp, __func__); /* Construct global filesystem path from vp. */ VOP_UNLOCK(vp); error = vn_fullpath_global(vp, &rpath, &fbuf); if (error != 0) { vrele(vp); return (error); } if (strlen(rpath) >= pathlen) { vrele(vp); error = ENAMETOOLONG; goto out; } /* * Re-lookup the vnode by path to detect a possible rename. * As a side effect, the vnode is relocked. * If vnode was renamed, return ENOENT. */ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_SYSSPACE, path); error = namei(&nd); if (error != 0) { vrele(vp); goto out; } NDFREE_PNBUF(&nd); vp1 = nd.ni_vp; vrele(vp); if (vp1 == vp) strcpy(path, rpath); else { vput(vp1); error = ENOENT; } out: free(fbuf, M_TEMP); return (error); } /* * This is similar to vn_path_to_global_path but allows for regular * files which may not be present in the cache. * * Requires a locked, referenced vnode. * Vnode is re-locked on success or ENODEV, otherwise unlocked. */ int vn_path_to_global_path_hardlink(struct thread *td, struct vnode *vp, struct vnode *dvp, char *path, u_int pathlen, const char *leaf_name, size_t leaf_length) { struct nameidata nd; struct vnode *vp1; char *rpath, *fbuf; size_t len; int error; ASSERT_VOP_ELOCKED(vp, __func__); /* * Construct global filesystem path from dvp, vp and leaf * name. */ VOP_UNLOCK(vp); len = pathlen; error = vn_fullpath_hardlink(vp, dvp, leaf_name, leaf_length, &rpath, &fbuf, &len); if (error != 0) { vrele(vp); return (error); } if (strlen(rpath) >= pathlen) { vrele(vp); error = ENAMETOOLONG; goto out; } /* * Re-lookup the vnode by path to detect a possible rename. * As a side effect, the vnode is relocked. * If vnode was renamed, return ENOENT. */ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_SYSSPACE, path); error = namei(&nd); if (error != 0) { vrele(vp); goto out; } NDFREE_PNBUF(&nd); vp1 = nd.ni_vp; vrele(vp); if (vp1 == vp) strcpy(path, rpath); else { vput(vp1); error = ENOENT; } out: free(fbuf, M_TEMP); return (error); } #ifdef DDB static void db_print_vpath(struct vnode *vp) { while (vp != NULL) { db_printf("%p: ", vp); if (vp == rootvnode) { db_printf("/"); vp = NULL; } else { if (vp->v_vflag & VV_ROOT) { db_printf(""); vp = vp->v_mount->mnt_vnodecovered; } else { struct namecache *ncp; char *ncn; int i; ncp = TAILQ_FIRST(&vp->v_cache_dst); if (ncp != NULL) { ncn = ncp->nc_name; for (i = 0; i < ncp->nc_nlen; i++) db_printf("%c", *ncn++); vp = ncp->nc_dvp; } else { vp = NULL; } } } db_printf("\n"); } return; } DB_SHOW_COMMAND(vpath, db_show_vpath) { struct vnode *vp; if (!have_addr) { db_printf("usage: show vpath \n"); return; } vp = (struct vnode *)addr; db_print_vpath(vp); } #endif static int cache_fast_lookup = 1; #define CACHE_FPL_FAILED -2020 static int cache_vop_bad_vexec(struct vop_fplookup_vexec_args *v) { vn_printf(v->a_vp, "no proper vop_fplookup_vexec\n"); panic("no proper vop_fplookup_vexec"); } static int cache_vop_bad_symlink(struct vop_fplookup_symlink_args *v) { vn_printf(v->a_vp, "no proper vop_fplookup_symlink\n"); panic("no proper vop_fplookup_symlink"); } void cache_vop_vector_register(struct vop_vector *v) { size_t ops; ops = 0; if (v->vop_fplookup_vexec != NULL) { ops++; } if (v->vop_fplookup_symlink != NULL) { ops++; } if (ops == 2) { return; } if (ops == 0) { v->vop_fplookup_vexec = cache_vop_bad_vexec; v->vop_fplookup_symlink = cache_vop_bad_symlink; return; } printf("%s: invalid vop vector %p -- either all or none fplookup vops " "need to be provided", __func__, v); if (v->vop_fplookup_vexec == NULL) { printf("%s: missing vop_fplookup_vexec\n", __func__); } if (v->vop_fplookup_symlink == NULL) { printf("%s: missing vop_fplookup_symlink\n", __func__); } panic("bad vop vector %p", v); } #ifdef INVARIANTS void cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops) { if (mp == NULL) return; if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) == 0) return; if (vops->vop_fplookup_vexec == NULL || vops->vop_fplookup_vexec == cache_vop_bad_vexec) panic("bad vop_fplookup_vexec on vector %p for filesystem %s", vops, mp->mnt_vfc->vfc_name); if (vops->vop_fplookup_symlink == NULL || vops->vop_fplookup_symlink == cache_vop_bad_symlink) panic("bad vop_fplookup_symlink on vector %p for filesystem %s", vops, mp->mnt_vfc->vfc_name); } #endif void cache_fast_lookup_enabled_recalc(void) { int lookup_flag; int mac_on; #ifdef MAC mac_on = mac_vnode_check_lookup_enabled(); mac_on |= mac_vnode_check_readlink_enabled(); #else mac_on = 0; #endif lookup_flag = atomic_load_int(&cache_fast_lookup); if (lookup_flag && !mac_on) { atomic_store_char(&cache_fast_lookup_enabled, true); } else { atomic_store_char(&cache_fast_lookup_enabled, false); } } static int syscal_vfs_cache_fast_lookup(SYSCTL_HANDLER_ARGS) { int error, old; old = atomic_load_int(&cache_fast_lookup); error = sysctl_handle_int(oidp, arg1, arg2, req); if (error == 0 && req->newptr && old != atomic_load_int(&cache_fast_lookup)) cache_fast_lookup_enabled_recalc(); return (error); } SYSCTL_PROC(_vfs_cache_param, OID_AUTO, fast_lookup, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, &cache_fast_lookup, 0, syscal_vfs_cache_fast_lookup, "IU", ""); /* * Components of nameidata (or objects it can point to) which may * need restoring in case fast path lookup fails. */ struct nameidata_outer { size_t ni_pathlen; int cn_flags; }; struct nameidata_saved { #ifdef INVARIANTS char *cn_nameptr; size_t ni_pathlen; #endif }; #ifdef INVARIANTS struct cache_fpl_debug { size_t ni_pathlen; }; #endif struct cache_fpl { struct nameidata *ndp; struct componentname *cnp; char *nulchar; struct vnode *dvp; struct vnode *tvp; seqc_t dvp_seqc; seqc_t tvp_seqc; uint32_t hash; struct nameidata_saved snd; struct nameidata_outer snd_outer; int line; enum cache_fpl_status status:8; bool in_smr; bool fsearch; struct pwd **pwd; #ifdef INVARIANTS struct cache_fpl_debug debug; #endif }; static bool cache_fplookup_mp_supported(struct mount *mp); static bool cache_fplookup_is_mp(struct cache_fpl *fpl); static int cache_fplookup_cross_mount(struct cache_fpl *fpl); static int cache_fplookup_partial_setup(struct cache_fpl *fpl); static int cache_fplookup_skip_slashes(struct cache_fpl *fpl); static int cache_fplookup_trailingslash(struct cache_fpl *fpl); static void cache_fpl_pathlen_dec(struct cache_fpl *fpl); static void cache_fpl_pathlen_inc(struct cache_fpl *fpl); static void cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n); static void cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n); static void cache_fpl_cleanup_cnp(struct componentname *cnp) { uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = NULL; cnp->cn_nameptr = NULL; } static struct vnode * cache_fpl_handle_root(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; ndp = fpl->ndp; cnp = fpl->cnp; MPASS(*(cnp->cn_nameptr) == '/'); cnp->cn_nameptr++; cache_fpl_pathlen_dec(fpl); if (__predict_false(*(cnp->cn_nameptr) == '/')) { do { cnp->cn_nameptr++; cache_fpl_pathlen_dec(fpl); } while (*(cnp->cn_nameptr) == '/'); } return (ndp->ni_rootdir); } static void cache_fpl_checkpoint_outer(struct cache_fpl *fpl) { fpl->snd_outer.ni_pathlen = fpl->ndp->ni_pathlen; fpl->snd_outer.cn_flags = fpl->ndp->ni_cnd.cn_flags; } static void cache_fpl_checkpoint(struct cache_fpl *fpl) { #ifdef INVARIANTS fpl->snd.cn_nameptr = fpl->ndp->ni_cnd.cn_nameptr; fpl->snd.ni_pathlen = fpl->debug.ni_pathlen; #endif } static void cache_fpl_restore_partial(struct cache_fpl *fpl) { fpl->ndp->ni_cnd.cn_flags = fpl->snd_outer.cn_flags; #ifdef INVARIANTS fpl->debug.ni_pathlen = fpl->snd.ni_pathlen; #endif } static void cache_fpl_restore_abort(struct cache_fpl *fpl) { cache_fpl_restore_partial(fpl); /* * It is 0 on entry by API contract. */ fpl->ndp->ni_resflags = 0; fpl->ndp->ni_cnd.cn_nameptr = fpl->ndp->ni_cnd.cn_pnbuf; fpl->ndp->ni_pathlen = fpl->snd_outer.ni_pathlen; } #ifdef INVARIANTS #define cache_fpl_smr_assert_entered(fpl) ({ \ struct cache_fpl *_fpl = (fpl); \ MPASS(_fpl->in_smr == true); \ VFS_SMR_ASSERT_ENTERED(); \ }) #define cache_fpl_smr_assert_not_entered(fpl) ({ \ struct cache_fpl *_fpl = (fpl); \ MPASS(_fpl->in_smr == false); \ VFS_SMR_ASSERT_NOT_ENTERED(); \ }) static void cache_fpl_assert_status(struct cache_fpl *fpl) { switch (fpl->status) { case CACHE_FPL_STATUS_UNSET: __assert_unreachable(); break; case CACHE_FPL_STATUS_DESTROYED: case CACHE_FPL_STATUS_ABORTED: case CACHE_FPL_STATUS_PARTIAL: case CACHE_FPL_STATUS_HANDLED: break; } } #else #define cache_fpl_smr_assert_entered(fpl) do { } while (0) #define cache_fpl_smr_assert_not_entered(fpl) do { } while (0) #define cache_fpl_assert_status(fpl) do { } while (0) #endif #define cache_fpl_smr_enter_initial(fpl) ({ \ struct cache_fpl *_fpl = (fpl); \ vfs_smr_enter(); \ _fpl->in_smr = true; \ }) #define cache_fpl_smr_enter(fpl) ({ \ struct cache_fpl *_fpl = (fpl); \ MPASS(_fpl->in_smr == false); \ vfs_smr_enter(); \ _fpl->in_smr = true; \ }) #define cache_fpl_smr_exit(fpl) ({ \ struct cache_fpl *_fpl = (fpl); \ MPASS(_fpl->in_smr == true); \ vfs_smr_exit(); \ _fpl->in_smr = false; \ }) static int cache_fpl_aborted_early_impl(struct cache_fpl *fpl, int line) { if (fpl->status != CACHE_FPL_STATUS_UNSET) { KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, ("%s: converting to abort from %d at %d, set at %d\n", __func__, fpl->status, line, fpl->line)); } cache_fpl_smr_assert_not_entered(fpl); fpl->status = CACHE_FPL_STATUS_ABORTED; fpl->line = line; return (CACHE_FPL_FAILED); } #define cache_fpl_aborted_early(x) cache_fpl_aborted_early_impl((x), __LINE__) static int __noinline cache_fpl_aborted_impl(struct cache_fpl *fpl, int line) { struct nameidata *ndp; struct componentname *cnp; ndp = fpl->ndp; cnp = fpl->cnp; if (fpl->status != CACHE_FPL_STATUS_UNSET) { KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, ("%s: converting to abort from %d at %d, set at %d\n", __func__, fpl->status, line, fpl->line)); } fpl->status = CACHE_FPL_STATUS_ABORTED; fpl->line = line; if (fpl->in_smr) cache_fpl_smr_exit(fpl); cache_fpl_restore_abort(fpl); /* * Resolving symlinks overwrites data passed by the caller. * Let namei know. */ if (ndp->ni_loopcnt > 0) { fpl->status = CACHE_FPL_STATUS_DESTROYED; cache_fpl_cleanup_cnp(cnp); } return (CACHE_FPL_FAILED); } #define cache_fpl_aborted(x) cache_fpl_aborted_impl((x), __LINE__) static int __noinline cache_fpl_partial_impl(struct cache_fpl *fpl, int line) { KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, ("%s: setting to partial at %d, but already set to %d at %d\n", __func__, line, fpl->status, fpl->line)); cache_fpl_smr_assert_entered(fpl); fpl->status = CACHE_FPL_STATUS_PARTIAL; fpl->line = line; return (cache_fplookup_partial_setup(fpl)); } #define cache_fpl_partial(x) cache_fpl_partial_impl((x), __LINE__) static int cache_fpl_handled_impl(struct cache_fpl *fpl, int line) { KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, ("%s: setting to handled at %d, but already set to %d at %d\n", __func__, line, fpl->status, fpl->line)); cache_fpl_smr_assert_not_entered(fpl); fpl->status = CACHE_FPL_STATUS_HANDLED; fpl->line = line; return (0); } #define cache_fpl_handled(x) cache_fpl_handled_impl((x), __LINE__) static int cache_fpl_handled_error_impl(struct cache_fpl *fpl, int error, int line) { KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, ("%s: setting to handled at %d, but already set to %d at %d\n", __func__, line, fpl->status, fpl->line)); MPASS(error != 0); MPASS(error != CACHE_FPL_FAILED); cache_fpl_smr_assert_not_entered(fpl); fpl->status = CACHE_FPL_STATUS_HANDLED; fpl->line = line; fpl->dvp = NULL; fpl->tvp = NULL; return (error); } #define cache_fpl_handled_error(x, e) cache_fpl_handled_error_impl((x), (e), __LINE__) static bool cache_fpl_terminated(struct cache_fpl *fpl) { return (fpl->status != CACHE_FPL_STATUS_UNSET); } #define CACHE_FPL_SUPPORTED_CN_FLAGS \ (NC_NOMAKEENTRY | NC_KEEPPOSENTRY | LOCKLEAF | LOCKPARENT | WANTPARENT | \ FAILIFEXISTS | FOLLOW | EMPTYPATH | LOCKSHARED | ISRESTARTED | WILLBEDIR | \ ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2 | NOCAPCHECK | OPENREAD | \ OPENWRITE | WANTIOCTLCAPS | OPENNAMED) #define CACHE_FPL_INTERNAL_CN_FLAGS \ (ISDOTDOT | MAKEENTRY | ISLASTCN) _Static_assert((CACHE_FPL_SUPPORTED_CN_FLAGS & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, "supported and internal flags overlap"); static bool cache_fpl_islastcn(struct nameidata *ndp) { return (*ndp->ni_next == 0); } static bool cache_fpl_istrailingslash(struct cache_fpl *fpl) { MPASS(fpl->nulchar > fpl->cnp->cn_pnbuf); return (*(fpl->nulchar - 1) == '/'); } static bool cache_fpl_isdotdot(struct componentname *cnp) { if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') return (true); return (false); } static bool cache_can_fplookup(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; struct thread *td; ndp = fpl->ndp; cnp = fpl->cnp; td = curthread; if (!atomic_load_char(&cache_fast_lookup_enabled)) { cache_fpl_aborted_early(fpl); return (false); } if ((cnp->cn_flags & ~CACHE_FPL_SUPPORTED_CN_FLAGS) != 0) { cache_fpl_aborted_early(fpl); return (false); } if (IN_CAPABILITY_MODE(td) || CAP_TRACING(td)) { cache_fpl_aborted_early(fpl); return (false); } if (AUDITING_TD(td)) { cache_fpl_aborted_early(fpl); return (false); } if (ndp->ni_startdir != NULL) { cache_fpl_aborted_early(fpl); return (false); } if ((cnp->cn_flags & OPENNAMED) != 0) { cache_fpl_aborted_early(fpl); return (false); } return (true); } static int __noinline cache_fplookup_dirfd(struct cache_fpl *fpl, struct vnode **vpp) { struct nameidata *ndp; struct componentname *cnp; int error; bool fsearch; ndp = fpl->ndp; cnp = fpl->cnp; error = fgetvp_lookup_smr(ndp, vpp, &fsearch); if (__predict_false(error != 0)) { return (cache_fpl_aborted(fpl)); } fpl->fsearch = fsearch; if ((*vpp)->v_type != VDIR) { if (!((cnp->cn_flags & EMPTYPATH) != 0 && cnp->cn_pnbuf[0] == '\0')) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOTDIR)); } } return (0); } static int __noinline cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp, uint32_t hash) { struct componentname *cnp; struct vnode *dvp; cnp = fpl->cnp; dvp = fpl->dvp; cache_fpl_smr_exit(fpl); if (cache_neg_promote_cond(dvp, cnp, oncp, hash)) return (cache_fpl_handled_error(fpl, ENOENT)); else return (cache_fpl_aborted(fpl)); } /* * The target vnode is not supported, prepare for the slow path to take over. */ static int __noinline cache_fplookup_partial_setup(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; enum vgetstate dvs; struct vnode *dvp; struct pwd *pwd; seqc_t dvp_seqc; ndp = fpl->ndp; cnp = fpl->cnp; pwd = *(fpl->pwd); dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; if (!pwd_hold_smr(pwd)) { return (cache_fpl_aborted(fpl)); } /* * Note that seqc is checked before the vnode is locked, so by * the time regular lookup gets to it it may have moved. * * Ultimately this does not affect correctness, any lookup errors * are userspace racing with itself. It is guaranteed that any * path which ultimately gets found could also have been found * by regular lookup going all the way in absence of concurrent * modifications. */ dvs = vget_prep_smr(dvp); cache_fpl_smr_exit(fpl); if (__predict_false(dvs == VGET_NONE)) { pwd_drop(pwd); return (cache_fpl_aborted(fpl)); } vget_finish_ref(dvp, dvs); if (!vn_seqc_consistent(dvp, dvp_seqc)) { vrele(dvp); pwd_drop(pwd); return (cache_fpl_aborted(fpl)); } cache_fpl_restore_partial(fpl); #ifdef INVARIANTS if (cnp->cn_nameptr != fpl->snd.cn_nameptr) { panic("%s: cn_nameptr mismatch (%p != %p) full [%s]\n", __func__, cnp->cn_nameptr, fpl->snd.cn_nameptr, cnp->cn_pnbuf); } #endif ndp->ni_startdir = dvp; cnp->cn_flags |= MAKEENTRY; if (cache_fpl_islastcn(ndp)) cnp->cn_flags |= ISLASTCN; if (cache_fpl_isdotdot(cnp)) cnp->cn_flags |= ISDOTDOT; /* * Skip potential extra slashes parsing did not take care of. * cache_fplookup_skip_slashes explains the mechanism. */ if (__predict_false(*(cnp->cn_nameptr) == '/')) { do { cnp->cn_nameptr++; cache_fpl_pathlen_dec(fpl); } while (*(cnp->cn_nameptr) == '/'); } ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr + 1; #ifdef INVARIANTS if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); } #endif return (0); } static int cache_fplookup_final_child(struct cache_fpl *fpl, enum vgetstate tvs) { struct componentname *cnp; struct vnode *tvp; seqc_t tvp_seqc; int error, lkflags; cnp = fpl->cnp; tvp = fpl->tvp; tvp_seqc = fpl->tvp_seqc; if ((cnp->cn_flags & LOCKLEAF) != 0) { lkflags = LK_SHARED; if ((cnp->cn_flags & LOCKSHARED) == 0) lkflags = LK_EXCLUSIVE; error = vget_finish(tvp, lkflags, tvs); if (__predict_false(error != 0)) { return (cache_fpl_aborted(fpl)); } } else { vget_finish_ref(tvp, tvs); } if (!vn_seqc_consistent(tvp, tvp_seqc)) { if ((cnp->cn_flags & LOCKLEAF) != 0) vput(tvp); else vrele(tvp); return (cache_fpl_aborted(fpl)); } return (cache_fpl_handled(fpl)); } /* * They want to possibly modify the state of the namecache. */ static int __noinline cache_fplookup_final_modifying(struct cache_fpl *fpl) { struct nameidata *ndp __diagused; struct componentname *cnp; enum vgetstate dvs; struct vnode *dvp, *tvp; struct mount *mp; seqc_t dvp_seqc; int error; bool docache; ndp = fpl->ndp; cnp = fpl->cnp; dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; MPASS(*(cnp->cn_nameptr) != '/'); MPASS(cache_fpl_islastcn(ndp)); if ((cnp->cn_flags & LOCKPARENT) == 0) MPASS((cnp->cn_flags & WANTPARENT) != 0); MPASS((cnp->cn_flags & TRAILINGSLASH) == 0); MPASS(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME); MPASS((cnp->cn_flags & MAKEENTRY) == 0); MPASS((cnp->cn_flags & ISDOTDOT) == 0); docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) docache = false; /* * Regular lookup nulifies the slash, which we don't do here. * Don't take chances with filesystem routines seeing it for * the last entry. */ if (cache_fpl_istrailingslash(fpl)) { return (cache_fpl_partial(fpl)); } mp = atomic_load_ptr(&dvp->v_mount); if (__predict_false(mp == NULL)) { return (cache_fpl_aborted(fpl)); } if (__predict_false(mp->mnt_flag & MNT_RDONLY)) { cache_fpl_smr_exit(fpl); /* * Original code keeps not checking for CREATE which * might be a bug. For now let the old lookup decide. */ if (cnp->cn_nameiop == CREATE) { return (cache_fpl_aborted(fpl)); } return (cache_fpl_handled_error(fpl, EROFS)); } if (fpl->tvp != NULL && (cnp->cn_flags & FAILIFEXISTS) != 0) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, EEXIST)); } /* * Secure access to dvp; check cache_fplookup_partial_setup for * reasoning. * * XXX At least UFS requires its lookup routine to be called for * the last path component, which leads to some level of complication * and inefficiency: * - the target routine always locks the target vnode, but our caller * may not need it locked * - some of the VOP machinery asserts that the parent is locked, which * once more may be not required * * TODO: add a flag for filesystems which don't need this. */ dvs = vget_prep_smr(dvp); cache_fpl_smr_exit(fpl); if (__predict_false(dvs == VGET_NONE)) { return (cache_fpl_aborted(fpl)); } vget_finish_ref(dvp, dvs); if (!vn_seqc_consistent(dvp, dvp_seqc)) { vrele(dvp); return (cache_fpl_aborted(fpl)); } error = vn_lock(dvp, LK_EXCLUSIVE); if (__predict_false(error != 0)) { vrele(dvp); return (cache_fpl_aborted(fpl)); } tvp = NULL; cnp->cn_flags |= ISLASTCN; if (docache) cnp->cn_flags |= MAKEENTRY; if (cache_fpl_isdotdot(cnp)) cnp->cn_flags |= ISDOTDOT; cnp->cn_lkflags = LK_EXCLUSIVE; error = VOP_LOOKUP(dvp, &tvp, cnp); switch (error) { case EJUSTRETURN: case 0: break; case ENOTDIR: case ENOENT: vput(dvp); return (cache_fpl_handled_error(fpl, error)); default: vput(dvp); return (cache_fpl_aborted(fpl)); } fpl->tvp = tvp; if (tvp == NULL) { MPASS(error == EJUSTRETURN); if ((cnp->cn_flags & LOCKPARENT) == 0) { VOP_UNLOCK(dvp); } return (cache_fpl_handled(fpl)); } /* * There are very hairy corner cases concerning various flag combinations * and locking state. In particular here we only hold one lock instead of * two. * * Skip the complexity as it is of no significance for normal workloads. */ if (__predict_false(tvp == dvp)) { vput(dvp); vrele(tvp); return (cache_fpl_aborted(fpl)); } /* * If they want the symlink itself we are fine, but if they want to * follow it regular lookup has to be engaged. */ if (tvp->v_type == VLNK) { if ((cnp->cn_flags & FOLLOW) != 0) { vput(dvp); vput(tvp); return (cache_fpl_aborted(fpl)); } } /* * Since we expect this to be the terminal vnode it should almost never * be a mount point. */ if (__predict_false(cache_fplookup_is_mp(fpl))) { vput(dvp); vput(tvp); return (cache_fpl_aborted(fpl)); } if ((cnp->cn_flags & FAILIFEXISTS) != 0) { vput(dvp); vput(tvp); return (cache_fpl_handled_error(fpl, EEXIST)); } if ((cnp->cn_flags & LOCKLEAF) == 0) { VOP_UNLOCK(tvp); } if ((cnp->cn_flags & LOCKPARENT) == 0) { VOP_UNLOCK(dvp); } return (cache_fpl_handled(fpl)); } static int __noinline cache_fplookup_modifying(struct cache_fpl *fpl) { struct nameidata *ndp; ndp = fpl->ndp; if (!cache_fpl_islastcn(ndp)) { return (cache_fpl_partial(fpl)); } return (cache_fplookup_final_modifying(fpl)); } static int __noinline cache_fplookup_final_withparent(struct cache_fpl *fpl) { struct componentname *cnp; enum vgetstate dvs, tvs; struct vnode *dvp, *tvp; seqc_t dvp_seqc; int error; cnp = fpl->cnp; dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; tvp = fpl->tvp; MPASS((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0); /* * This is less efficient than it can be for simplicity. */ dvs = vget_prep_smr(dvp); if (__predict_false(dvs == VGET_NONE)) { return (cache_fpl_aborted(fpl)); } tvs = vget_prep_smr(tvp); if (__predict_false(tvs == VGET_NONE)) { cache_fpl_smr_exit(fpl); vget_abort(dvp, dvs); return (cache_fpl_aborted(fpl)); } cache_fpl_smr_exit(fpl); if ((cnp->cn_flags & LOCKPARENT) != 0) { error = vget_finish(dvp, LK_EXCLUSIVE, dvs); if (__predict_false(error != 0)) { vget_abort(tvp, tvs); return (cache_fpl_aborted(fpl)); } } else { vget_finish_ref(dvp, dvs); } if (!vn_seqc_consistent(dvp, dvp_seqc)) { vget_abort(tvp, tvs); if ((cnp->cn_flags & LOCKPARENT) != 0) vput(dvp); else vrele(dvp); return (cache_fpl_aborted(fpl)); } error = cache_fplookup_final_child(fpl, tvs); if (__predict_false(error != 0)) { MPASS(fpl->status == CACHE_FPL_STATUS_ABORTED || fpl->status == CACHE_FPL_STATUS_DESTROYED); if ((cnp->cn_flags & LOCKPARENT) != 0) vput(dvp); else vrele(dvp); return (error); } MPASS(fpl->status == CACHE_FPL_STATUS_HANDLED); return (0); } static int cache_fplookup_final(struct cache_fpl *fpl) { struct componentname *cnp; enum vgetstate tvs; struct vnode *dvp, *tvp; seqc_t dvp_seqc; cnp = fpl->cnp; dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; tvp = fpl->tvp; MPASS(*(cnp->cn_nameptr) != '/'); if (cnp->cn_nameiop != LOOKUP) { return (cache_fplookup_final_modifying(fpl)); } if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) return (cache_fplookup_final_withparent(fpl)); tvs = vget_prep_smr(tvp); if (__predict_false(tvs == VGET_NONE)) { return (cache_fpl_partial(fpl)); } if (!vn_seqc_consistent(dvp, dvp_seqc)) { cache_fpl_smr_exit(fpl); vget_abort(tvp, tvs); return (cache_fpl_aborted(fpl)); } cache_fpl_smr_exit(fpl); return (cache_fplookup_final_child(fpl, tvs)); } /* * Comment from locked lookup: * Check for degenerate name (e.g. / or "") which is a way of talking about a * directory, e.g. like "/." or ".". */ static int __noinline cache_fplookup_degenerate(struct cache_fpl *fpl) { struct componentname *cnp; struct vnode *dvp; enum vgetstate dvs; int error, lkflags; #ifdef INVARIANTS char *cp; #endif fpl->tvp = fpl->dvp; fpl->tvp_seqc = fpl->dvp_seqc; cnp = fpl->cnp; dvp = fpl->dvp; #ifdef INVARIANTS for (cp = cnp->cn_pnbuf; *cp != '\0'; cp++) { KASSERT(*cp == '/', ("%s: encountered non-slash; string [%s]\n", __func__, cnp->cn_pnbuf)); } #endif if (__predict_false(cnp->cn_nameiop != LOOKUP)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, EISDIR)); } if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) { return (cache_fplookup_final_withparent(fpl)); } dvs = vget_prep_smr(dvp); cache_fpl_smr_exit(fpl); if (__predict_false(dvs == VGET_NONE)) { return (cache_fpl_aborted(fpl)); } if ((cnp->cn_flags & LOCKLEAF) != 0) { lkflags = LK_SHARED; if ((cnp->cn_flags & LOCKSHARED) == 0) lkflags = LK_EXCLUSIVE; error = vget_finish(dvp, lkflags, dvs); if (__predict_false(error != 0)) { return (cache_fpl_aborted(fpl)); } } else { vget_finish_ref(dvp, dvs); } return (cache_fpl_handled(fpl)); } static int __noinline cache_fplookup_emptypath(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; enum vgetstate tvs; struct vnode *tvp; int error, lkflags; fpl->tvp = fpl->dvp; fpl->tvp_seqc = fpl->dvp_seqc; ndp = fpl->ndp; cnp = fpl->cnp; tvp = fpl->tvp; MPASS(*cnp->cn_pnbuf == '\0'); if (__predict_false((cnp->cn_flags & EMPTYPATH) == 0)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOENT)); } MPASS((cnp->cn_flags & (LOCKPARENT | WANTPARENT)) == 0); tvs = vget_prep_smr(tvp); cache_fpl_smr_exit(fpl); if (__predict_false(tvs == VGET_NONE)) { return (cache_fpl_aborted(fpl)); } if ((cnp->cn_flags & LOCKLEAF) != 0) { lkflags = LK_SHARED; if ((cnp->cn_flags & LOCKSHARED) == 0) lkflags = LK_EXCLUSIVE; error = vget_finish(tvp, lkflags, tvs); if (__predict_false(error != 0)) { return (cache_fpl_aborted(fpl)); } } else { vget_finish_ref(tvp, tvs); } ndp->ni_resflags |= NIRES_EMPTYPATH; return (cache_fpl_handled(fpl)); } static int __noinline cache_fplookup_noentry(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; enum vgetstate dvs; struct vnode *dvp, *tvp; seqc_t dvp_seqc; int error; ndp = fpl->ndp; cnp = fpl->cnp; dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; MPASS((cnp->cn_flags & MAKEENTRY) == 0); MPASS((cnp->cn_flags & ISDOTDOT) == 0); if (cnp->cn_nameiop == LOOKUP) MPASS((cnp->cn_flags & NOCACHE) == 0); MPASS(!cache_fpl_isdotdot(cnp)); /* * Hack: delayed name len checking. */ if (__predict_false(cnp->cn_namelen > NAME_MAX)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); } if (cnp->cn_nameptr[0] == '/') { return (cache_fplookup_skip_slashes(fpl)); } if (cnp->cn_pnbuf[0] == '\0') { return (cache_fplookup_emptypath(fpl)); } if (cnp->cn_nameptr[0] == '\0') { if (fpl->tvp == NULL) { return (cache_fplookup_degenerate(fpl)); } return (cache_fplookup_trailingslash(fpl)); } if (cnp->cn_nameiop != LOOKUP) { fpl->tvp = NULL; return (cache_fplookup_modifying(fpl)); } /* * Only try to fill in the component if it is the last one, * otherwise not only there may be several to handle but the * walk may be complicated. */ if (!cache_fpl_islastcn(ndp)) { return (cache_fpl_partial(fpl)); } /* * Regular lookup nulifies the slash, which we don't do here. * Don't take chances with filesystem routines seeing it for * the last entry. */ if (cache_fpl_istrailingslash(fpl)) { return (cache_fpl_partial(fpl)); } /* * Secure access to dvp; check cache_fplookup_partial_setup for * reasoning. */ dvs = vget_prep_smr(dvp); cache_fpl_smr_exit(fpl); if (__predict_false(dvs == VGET_NONE)) { return (cache_fpl_aborted(fpl)); } vget_finish_ref(dvp, dvs); if (!vn_seqc_consistent(dvp, dvp_seqc)) { vrele(dvp); return (cache_fpl_aborted(fpl)); } error = vn_lock(dvp, LK_SHARED); if (__predict_false(error != 0)) { vrele(dvp); return (cache_fpl_aborted(fpl)); } tvp = NULL; /* * TODO: provide variants which don't require locking either vnode. */ cnp->cn_flags |= ISLASTCN | MAKEENTRY; cnp->cn_lkflags = LK_SHARED; if ((cnp->cn_flags & LOCKSHARED) == 0) { cnp->cn_lkflags = LK_EXCLUSIVE; } error = VOP_LOOKUP(dvp, &tvp, cnp); switch (error) { case EJUSTRETURN: case 0: break; case ENOTDIR: case ENOENT: vput(dvp); return (cache_fpl_handled_error(fpl, error)); default: vput(dvp); return (cache_fpl_aborted(fpl)); } fpl->tvp = tvp; if (tvp == NULL) { MPASS(error == EJUSTRETURN); if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { vput(dvp); } else if ((cnp->cn_flags & LOCKPARENT) == 0) { VOP_UNLOCK(dvp); } return (cache_fpl_handled(fpl)); } if (tvp->v_type == VLNK) { if ((cnp->cn_flags & FOLLOW) != 0) { vput(dvp); vput(tvp); return (cache_fpl_aborted(fpl)); } } if (__predict_false(cache_fplookup_is_mp(fpl))) { vput(dvp); vput(tvp); return (cache_fpl_aborted(fpl)); } if ((cnp->cn_flags & LOCKLEAF) == 0) { VOP_UNLOCK(tvp); } if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { vput(dvp); } else if ((cnp->cn_flags & LOCKPARENT) == 0) { VOP_UNLOCK(dvp); } return (cache_fpl_handled(fpl)); } static int __noinline cache_fplookup_dot(struct cache_fpl *fpl) { int error; MPASS(!seqc_in_modify(fpl->dvp_seqc)); if (__predict_false(fpl->dvp->v_type != VDIR)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOTDIR)); } /* * Just re-assign the value. seqc will be checked later for the first * non-dot path component in line and/or before deciding to return the * vnode. */ fpl->tvp = fpl->dvp; fpl->tvp_seqc = fpl->dvp_seqc; SDT_PROBE3(vfs, namecache, lookup, hit, fpl->dvp, ".", fpl->dvp); error = 0; if (cache_fplookup_is_mp(fpl)) { error = cache_fplookup_cross_mount(fpl); } return (error); } static int __noinline cache_fplookup_dotdot(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; struct namecache *ncp; struct vnode *dvp; - struct prison *pr; u_char nc_flag; ndp = fpl->ndp; cnp = fpl->cnp; dvp = fpl->dvp; MPASS(cache_fpl_isdotdot(cnp)); /* * XXX this is racy the same way regular lookup is */ - for (pr = cnp->cn_cred->cr_prison; pr != NULL; - pr = pr->pr_parent) - if (dvp == pr->pr_root) - break; - - if (dvp == ndp->ni_rootdir || - dvp == ndp->ni_topdir || - dvp == rootvnode || - pr != NULL) { + if (vfs_lookup_isroot(ndp, dvp)) { fpl->tvp = dvp; fpl->tvp_seqc = vn_seqc_read_any(dvp); if (seqc_in_modify(fpl->tvp_seqc)) { return (cache_fpl_aborted(fpl)); } return (0); } if ((dvp->v_vflag & VV_ROOT) != 0) { /* * TODO * The opposite of climb mount is needed here. */ return (cache_fpl_partial(fpl)); } if (__predict_false(dvp->v_type != VDIR)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOTDIR)); } ncp = atomic_load_consume_ptr(&dvp->v_cache_dd); if (ncp == NULL) { return (cache_fpl_aborted(fpl)); } nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_ISDOTDOT) != 0) { if ((nc_flag & NCF_NEGATIVE) != 0) return (cache_fpl_aborted(fpl)); fpl->tvp = ncp->nc_vp; } else { fpl->tvp = ncp->nc_dvp; } fpl->tvp_seqc = vn_seqc_read_any(fpl->tvp); if (seqc_in_modify(fpl->tvp_seqc)) { return (cache_fpl_partial(fpl)); } /* * Acquire fence provided by vn_seqc_read_any above. */ if (__predict_false(atomic_load_ptr(&dvp->v_cache_dd) != ncp)) { return (cache_fpl_aborted(fpl)); } if (!cache_ncp_canuse(ncp)) { return (cache_fpl_aborted(fpl)); } return (0); } static int __noinline cache_fplookup_neg(struct cache_fpl *fpl, struct namecache *ncp, uint32_t hash) { u_char nc_flag __diagused; bool neg_promote; #ifdef INVARIANTS nc_flag = atomic_load_char(&ncp->nc_flag); MPASS((nc_flag & NCF_NEGATIVE) != 0); #endif /* * If they want to create an entry we need to replace this one. */ if (__predict_false(fpl->cnp->cn_nameiop != LOOKUP)) { fpl->tvp = NULL; return (cache_fplookup_modifying(fpl)); } neg_promote = cache_neg_hit_prep(ncp); if (!cache_fpl_neg_ncp_canuse(ncp)) { cache_neg_hit_abort(ncp); return (cache_fpl_partial(fpl)); } if (neg_promote) { return (cache_fplookup_negative_promote(fpl, ncp, hash)); } cache_neg_hit_finish(ncp); cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOENT)); } /* * Resolve a symlink. Called by filesystem-specific routines. * * Code flow is: * ... -> cache_fplookup_symlink -> VOP_FPLOOKUP_SYMLINK -> cache_symlink_resolve */ int cache_symlink_resolve(struct cache_fpl *fpl, const char *string, size_t len) { struct nameidata *ndp; struct componentname *cnp; size_t adjust; ndp = fpl->ndp; cnp = fpl->cnp; if (__predict_false(len == 0)) { return (ENOENT); } if (__predict_false(len > MAXPATHLEN - 2)) { if (cache_fpl_istrailingslash(fpl)) { return (EAGAIN); } } ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr - cnp->cn_namelen + 1; #ifdef INVARIANTS if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); } #endif if (__predict_false(len + ndp->ni_pathlen > MAXPATHLEN)) { return (ENAMETOOLONG); } if (__predict_false(ndp->ni_loopcnt++ >= MAXSYMLINKS)) { return (ELOOP); } adjust = len; if (ndp->ni_pathlen > 1) { bcopy(ndp->ni_next, cnp->cn_pnbuf + len, ndp->ni_pathlen); } else { if (cache_fpl_istrailingslash(fpl)) { adjust = len + 1; cnp->cn_pnbuf[len] = '/'; cnp->cn_pnbuf[len + 1] = '\0'; } else { cnp->cn_pnbuf[len] = '\0'; } } bcopy(string, cnp->cn_pnbuf, len); ndp->ni_pathlen += adjust; cache_fpl_pathlen_add(fpl, adjust); cnp->cn_nameptr = cnp->cn_pnbuf; fpl->nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; fpl->tvp = NULL; return (0); } static int __noinline cache_fplookup_symlink(struct cache_fpl *fpl) { struct mount *mp; struct nameidata *ndp; struct componentname *cnp; struct vnode *dvp, *tvp; struct pwd *pwd; int error; ndp = fpl->ndp; cnp = fpl->cnp; dvp = fpl->dvp; tvp = fpl->tvp; pwd = *(fpl->pwd); if (cache_fpl_islastcn(ndp)) { if ((cnp->cn_flags & FOLLOW) == 0) { return (cache_fplookup_final(fpl)); } } mp = atomic_load_ptr(&dvp->v_mount); if (__predict_false(mp == NULL)) { return (cache_fpl_aborted(fpl)); } /* * Note this check races against setting the flag just like regular * lookup. */ if (__predict_false((mp->mnt_flag & MNT_NOSYMFOLLOW) != 0)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, EACCES)); } error = VOP_FPLOOKUP_SYMLINK(tvp, fpl); if (__predict_false(error != 0)) { switch (error) { case EAGAIN: return (cache_fpl_partial(fpl)); case ENOENT: case ENAMETOOLONG: case ELOOP: cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, error)); default: return (cache_fpl_aborted(fpl)); } } if (*(cnp->cn_nameptr) == '/') { fpl->dvp = cache_fpl_handle_root(fpl); fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp); if (seqc_in_modify(fpl->dvp_seqc)) { return (cache_fpl_aborted(fpl)); } /* * The main loop assumes that ->dvp points to a vnode belonging * to a filesystem which can do lockless lookup, but the absolute * symlink can be wandering off to one which does not. */ mp = atomic_load_ptr(&fpl->dvp->v_mount); if (__predict_false(mp == NULL)) { return (cache_fpl_aborted(fpl)); } if (!cache_fplookup_mp_supported(mp)) { cache_fpl_checkpoint(fpl); return (cache_fpl_partial(fpl)); } if (__predict_false(pwd->pwd_adir != pwd->pwd_rdir)) { return (cache_fpl_aborted(fpl)); } } return (0); } static int cache_fplookup_next(struct cache_fpl *fpl) { struct componentname *cnp; struct namecache *ncp; struct vnode *dvp, *tvp; u_char nc_flag; uint32_t hash; int error; cnp = fpl->cnp; dvp = fpl->dvp; hash = fpl->hash; if (__predict_false(cnp->cn_nameptr[0] == '.')) { if (cnp->cn_namelen == 1) { return (cache_fplookup_dot(fpl)); } if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { return (cache_fplookup_dotdot(fpl)); } } MPASS(!cache_fpl_isdotdot(cnp)); ncp = cache_ncp_find(dvp, cnp, hash); if (__predict_false(ncp == NULL)) { return (cache_fplookup_noentry(fpl)); } tvp = atomic_load_ptr(&ncp->nc_vp); nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_NEGATIVE) != 0) { return (cache_fplookup_neg(fpl, ncp, hash)); } if (!cache_ncp_canuse(ncp)) { return (cache_fpl_partial(fpl)); } fpl->tvp = tvp; fpl->tvp_seqc = vn_seqc_read_any(tvp); if (seqc_in_modify(fpl->tvp_seqc)) { return (cache_fpl_partial(fpl)); } counter_u64_add(numposhits, 1); SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, tvp); error = 0; if (cache_fplookup_is_mp(fpl)) { error = cache_fplookup_cross_mount(fpl); } return (error); } static bool cache_fplookup_mp_supported(struct mount *mp) { MPASS(mp != NULL); if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) == 0) return (false); return (true); } /* * Walk up the mount stack (if any). * * Correctness is provided in the following ways: * - all vnodes are protected from freeing with SMR * - struct mount objects are type stable making them always safe to access * - stability of the particular mount is provided by busying it * - relationship between the vnode which is mounted on and the mount is * verified with the vnode sequence counter after busying * - association between root vnode of the mount and the mount is protected * by busy * * From that point on we can read the sequence counter of the root vnode * and get the next mount on the stack (if any) using the same protection. * * By the end of successful walk we are guaranteed the reached state was * indeed present at least at some point which matches the regular lookup. */ static int __noinline cache_fplookup_climb_mount(struct cache_fpl *fpl) { struct mount *mp, *prev_mp; struct mount_pcpu *mpcpu, *prev_mpcpu; struct vnode *vp; seqc_t vp_seqc; vp = fpl->tvp; vp_seqc = fpl->tvp_seqc; VNPASS(vp->v_type == VDIR || vp->v_type == VREG || vp->v_type == VBAD, vp); mp = atomic_load_ptr(&vp->v_mountedhere); if (__predict_false(mp == NULL)) { return (0); } prev_mp = NULL; for (;;) { if (!vfs_op_thread_enter_crit(mp, mpcpu)) { if (prev_mp != NULL) vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); return (cache_fpl_partial(fpl)); } if (prev_mp != NULL) vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); if (!vn_seqc_consistent(vp, vp_seqc)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } if (!cache_fplookup_mp_supported(mp)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } vp = atomic_load_ptr(&mp->mnt_rootvnode); if (vp == NULL) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } vp_seqc = vn_seqc_read_any(vp); if (seqc_in_modify(vp_seqc)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } prev_mp = mp; prev_mpcpu = mpcpu; mp = atomic_load_ptr(&vp->v_mountedhere); if (mp == NULL) break; } vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); fpl->tvp = vp; fpl->tvp_seqc = vp_seqc; return (0); } static int __noinline cache_fplookup_cross_mount(struct cache_fpl *fpl) { struct mount *mp; struct mount_pcpu *mpcpu; struct vnode *vp; seqc_t vp_seqc; vp = fpl->tvp; vp_seqc = fpl->tvp_seqc; VNPASS(vp->v_type == VDIR || vp->v_type == VREG || vp->v_type == VBAD, vp); mp = atomic_load_ptr(&vp->v_mountedhere); if (__predict_false(mp == NULL)) { return (0); } if (!vfs_op_thread_enter_crit(mp, mpcpu)) { return (cache_fpl_partial(fpl)); } if (!vn_seqc_consistent(vp, vp_seqc)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } if (!cache_fplookup_mp_supported(mp)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } vp = atomic_load_ptr(&mp->mnt_rootvnode); if (__predict_false(vp == NULL)) { vfs_op_thread_exit_crit(mp, mpcpu); return (cache_fpl_partial(fpl)); } vp_seqc = vn_seqc_read_any(vp); vfs_op_thread_exit_crit(mp, mpcpu); if (seqc_in_modify(vp_seqc)) { return (cache_fpl_partial(fpl)); } mp = atomic_load_ptr(&vp->v_mountedhere); if (__predict_false(mp != NULL)) { /* * There are possibly more mount points on top. * Normally this does not happen so for simplicity just start * over. */ return (cache_fplookup_climb_mount(fpl)); } fpl->tvp = vp; fpl->tvp_seqc = vp_seqc; return (0); } /* * Check if a vnode is mounted on. */ static bool cache_fplookup_is_mp(struct cache_fpl *fpl) { struct vnode *vp; vp = fpl->tvp; return ((vn_irflag_read(vp) & VIRF_MOUNTPOINT) != 0); } /* * Parse the path. * * The code was originally copy-pasted from regular lookup and despite * clean ups leaves performance on the table. Any modifications here * must take into account that in case off fallback the resulting * nameidata state has to be compatible with the original. */ /* * Debug ni_pathlen tracking. */ #ifdef INVARIANTS static void cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) { fpl->debug.ni_pathlen += n; KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, ("%s: pathlen overflow to %zd\n", __func__, fpl->debug.ni_pathlen)); } static void cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) { fpl->debug.ni_pathlen -= n; KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, ("%s: pathlen underflow to %zd\n", __func__, fpl->debug.ni_pathlen)); } static void cache_fpl_pathlen_inc(struct cache_fpl *fpl) { cache_fpl_pathlen_add(fpl, 1); } static void cache_fpl_pathlen_dec(struct cache_fpl *fpl) { cache_fpl_pathlen_sub(fpl, 1); } #else static void cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) { } static void cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) { } static void cache_fpl_pathlen_inc(struct cache_fpl *fpl) { } static void cache_fpl_pathlen_dec(struct cache_fpl *fpl) { } #endif static void cache_fplookup_parse(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; struct vnode *dvp; char *cp; uint32_t hash; ndp = fpl->ndp; cnp = fpl->cnp; dvp = fpl->dvp; /* * Find the end of this path component, it is either / or nul. * * Store / as a temporary sentinel so that we only have one character * to test for. Pathnames tend to be short so this should not be * resulting in cache misses. * * TODO: fix this to be word-sized. */ MPASS(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 1] >= cnp->cn_pnbuf); KASSERT(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 1] == fpl->nulchar, ("%s: mismatch between pathlen (%zu) and nulchar (%p != %p), string [%s]\n", __func__, fpl->debug.ni_pathlen, &cnp->cn_nameptr[fpl->debug.ni_pathlen - 1], fpl->nulchar, cnp->cn_pnbuf)); KASSERT(*fpl->nulchar == '\0', ("%s: expected nul at %p; string [%s]\n", __func__, fpl->nulchar, cnp->cn_pnbuf)); hash = cache_get_hash_iter_start(dvp); *fpl->nulchar = '/'; for (cp = cnp->cn_nameptr; *cp != '/'; cp++) { KASSERT(*cp != '\0', ("%s: encountered unexpected nul; string [%s]\n", __func__, cnp->cn_nameptr)); hash = cache_get_hash_iter(*cp, hash); continue; } *fpl->nulchar = '\0'; fpl->hash = cache_get_hash_iter_finish(hash); cnp->cn_namelen = cp - cnp->cn_nameptr; cache_fpl_pathlen_sub(fpl, cnp->cn_namelen); #ifdef INVARIANTS /* * cache_get_hash only accepts lengths up to NAME_MAX. This is fine since * we are going to fail this lookup with ENAMETOOLONG (see below). */ if (cnp->cn_namelen <= NAME_MAX) { if (fpl->hash != cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp)) { panic("%s: mismatched hash for [%s] len %ld", __func__, cnp->cn_nameptr, cnp->cn_namelen); } } #endif /* * Hack: we have to check if the found path component's length exceeds * NAME_MAX. However, the condition is very rarely true and check can * be elided in the common case -- if an entry was found in the cache, * then it could not have been too long to begin with. */ ndp->ni_next = cp; } static void cache_fplookup_parse_advance(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; ndp = fpl->ndp; cnp = fpl->cnp; cnp->cn_nameptr = ndp->ni_next; KASSERT(*(cnp->cn_nameptr) == '/', ("%s: should have seen slash at %p ; buf %p [%s]\n", __func__, cnp->cn_nameptr, cnp->cn_pnbuf, cnp->cn_pnbuf)); cnp->cn_nameptr++; cache_fpl_pathlen_dec(fpl); } /* * Skip spurious slashes in a pathname (e.g., "foo///bar") and retry. * * Lockless lookup tries to elide checking for spurious slashes and should they * be present is guaranteed to fail to find an entry. In this case the caller * must check if the name starts with a slash and call this routine. It is * going to fast forward across the spurious slashes and set the state up for * retry. */ static int __noinline cache_fplookup_skip_slashes(struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; ndp = fpl->ndp; cnp = fpl->cnp; MPASS(*(cnp->cn_nameptr) == '/'); do { cnp->cn_nameptr++; cache_fpl_pathlen_dec(fpl); } while (*(cnp->cn_nameptr) == '/'); /* * Go back to one slash so that cache_fplookup_parse_advance has * something to skip. */ cnp->cn_nameptr--; cache_fpl_pathlen_inc(fpl); /* * cache_fplookup_parse_advance starts from ndp->ni_next */ ndp->ni_next = cnp->cn_nameptr; /* * See cache_fplookup_dot. */ fpl->tvp = fpl->dvp; fpl->tvp_seqc = fpl->dvp_seqc; return (0); } /* * Handle trailing slashes (e.g., "foo/"). * * If a trailing slash is found the terminal vnode must be a directory. * Regular lookup shortens the path by nulifying the first trailing slash and * sets the TRAILINGSLASH flag to denote this took place. There are several * checks on it performed later. * * Similarly to spurious slashes, lockless lookup handles this in a speculative * manner relying on an invariant that a non-directory vnode will get a miss. * In this case cn_nameptr[0] == '\0' and cn_namelen == 0. * * Thus for a path like "foo/bar/" the code unwinds the state back to "bar/" * and denotes this is the last path component, which avoids looping back. * * Only plain lookups are supported for now to restrict corner cases to handle. */ static int __noinline cache_fplookup_trailingslash(struct cache_fpl *fpl) { #ifdef INVARIANTS size_t ni_pathlen; #endif struct nameidata *ndp; struct componentname *cnp; struct namecache *ncp; struct vnode *tvp; char *cn_nameptr_orig, *cn_nameptr_slash; seqc_t tvp_seqc; u_char nc_flag; ndp = fpl->ndp; cnp = fpl->cnp; tvp = fpl->tvp; tvp_seqc = fpl->tvp_seqc; MPASS(fpl->dvp == fpl->tvp); KASSERT(cache_fpl_istrailingslash(fpl), ("%s: expected trailing slash at %p; string [%s]\n", __func__, fpl->nulchar - 1, cnp->cn_pnbuf)); KASSERT(cnp->cn_nameptr[0] == '\0', ("%s: expected nul char at %p; string [%s]\n", __func__, &cnp->cn_nameptr[0], cnp->cn_pnbuf)); KASSERT(cnp->cn_namelen == 0, ("%s: namelen 0 but got %ld; string [%s]\n", __func__, cnp->cn_namelen, cnp->cn_pnbuf)); MPASS(cnp->cn_nameptr > cnp->cn_pnbuf); if (cnp->cn_nameiop != LOOKUP) { return (cache_fpl_aborted(fpl)); } if (__predict_false(tvp->v_type != VDIR)) { if (!vn_seqc_consistent(tvp, tvp_seqc)) { return (cache_fpl_aborted(fpl)); } cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENOTDIR)); } /* * Denote the last component. */ ndp->ni_next = &cnp->cn_nameptr[0]; MPASS(cache_fpl_islastcn(ndp)); /* * Unwind trailing slashes. */ cn_nameptr_orig = cnp->cn_nameptr; while (cnp->cn_nameptr >= cnp->cn_pnbuf) { cnp->cn_nameptr--; if (cnp->cn_nameptr[0] != '/') { break; } } /* * Unwind to the beginning of the path component. * * Note the path may or may not have started with a slash. */ cn_nameptr_slash = cnp->cn_nameptr; while (cnp->cn_nameptr > cnp->cn_pnbuf) { cnp->cn_nameptr--; if (cnp->cn_nameptr[0] == '/') { break; } } if (cnp->cn_nameptr[0] == '/') { cnp->cn_nameptr++; } cnp->cn_namelen = cn_nameptr_slash - cnp->cn_nameptr + 1; cache_fpl_pathlen_add(fpl, cn_nameptr_orig - cnp->cn_nameptr); cache_fpl_checkpoint(fpl); #ifdef INVARIANTS ni_pathlen = fpl->nulchar - cnp->cn_nameptr + 1; if (ni_pathlen != fpl->debug.ni_pathlen) { panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", __func__, ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); } #endif /* * If this was a "./" lookup the parent directory is already correct. */ if (cnp->cn_nameptr[0] == '.' && cnp->cn_namelen == 1) { return (0); } /* * Otherwise we need to look it up. */ tvp = fpl->tvp; ncp = atomic_load_consume_ptr(&tvp->v_cache_dd); if (__predict_false(ncp == NULL)) { return (cache_fpl_aborted(fpl)); } nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_ISDOTDOT) != 0) { return (cache_fpl_aborted(fpl)); } fpl->dvp = ncp->nc_dvp; fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp); if (seqc_in_modify(fpl->dvp_seqc)) { return (cache_fpl_aborted(fpl)); } return (0); } /* * See the API contract for VOP_FPLOOKUP_VEXEC. */ static int __noinline cache_fplookup_failed_vexec(struct cache_fpl *fpl, int error) { struct componentname *cnp; struct vnode *dvp; seqc_t dvp_seqc; cnp = fpl->cnp; dvp = fpl->dvp; dvp_seqc = fpl->dvp_seqc; /* * Hack: delayed empty path checking. */ if (cnp->cn_pnbuf[0] == '\0') { return (cache_fplookup_emptypath(fpl)); } /* * TODO: Due to ignoring trailing slashes lookup will perform a * permission check on the last dir when it should not be doing it. It * may fail, but said failure should be ignored. It is possible to fix * it up fully without resorting to regular lookup, but for now just * abort. */ if (cache_fpl_istrailingslash(fpl)) { return (cache_fpl_aborted(fpl)); } /* * Hack: delayed degenerate path checking. */ if (cnp->cn_nameptr[0] == '\0' && fpl->tvp == NULL) { return (cache_fplookup_degenerate(fpl)); } /* * Hack: delayed name len checking. */ if (__predict_false(cnp->cn_namelen > NAME_MAX)) { cache_fpl_smr_exit(fpl); return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); } /* * Hack: they may be looking up foo/bar, where foo is not a directory. * In such a case we need to return ENOTDIR, but we may happen to get * here with a different error. */ if (dvp->v_type != VDIR) { error = ENOTDIR; } /* * Hack: handle O_SEARCH. * * Open Group Base Specifications Issue 7, 2018 edition states: * * If the access mode of the open file description associated with the * file descriptor is not O_SEARCH, the function shall check whether * directory searches are permitted using the current permissions of * the directory underlying the file descriptor. If the access mode is * O_SEARCH, the function shall not perform the check. * * * Regular lookup tests for the NOEXECCHECK flag for every path * component to decide whether to do the permission check. However, * since most lookups never have the flag (and when they do it is only * present for the first path component), lockless lookup only acts on * it if there is a permission problem. Here the flag is represented * with a boolean so that we don't have to clear it on the way out. * * For simplicity this always aborts. * TODO: check if this is the first lookup and ignore the permission * problem. Note the flag has to survive fallback (if it happens to be * performed). */ if (fpl->fsearch) { return (cache_fpl_aborted(fpl)); } switch (error) { case EAGAIN: if (!vn_seqc_consistent(dvp, dvp_seqc)) { error = cache_fpl_aborted(fpl); } else { cache_fpl_partial(fpl); } break; default: if (!vn_seqc_consistent(dvp, dvp_seqc)) { error = cache_fpl_aborted(fpl); } else { cache_fpl_smr_exit(fpl); cache_fpl_handled_error(fpl, error); } break; } return (error); } static int cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl) { struct nameidata *ndp; struct componentname *cnp; struct mount *mp; int error; ndp = fpl->ndp; cnp = fpl->cnp; cache_fpl_checkpoint(fpl); /* * The vnode at hand is almost always stable, skip checking for it. * Worst case this postpones the check towards the end of the iteration * of the main loop. */ fpl->dvp = dvp; fpl->dvp_seqc = vn_seqc_read_notmodify(fpl->dvp); mp = atomic_load_ptr(&dvp->v_mount); if (__predict_false(mp == NULL || !cache_fplookup_mp_supported(mp))) { return (cache_fpl_aborted(fpl)); } MPASS(fpl->tvp == NULL); for (;;) { cache_fplookup_parse(fpl); error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred); if (__predict_false(error != 0)) { error = cache_fplookup_failed_vexec(fpl, error); break; } error = cache_fplookup_next(fpl); if (__predict_false(cache_fpl_terminated(fpl))) { break; } VNPASS(!seqc_in_modify(fpl->tvp_seqc), fpl->tvp); if (fpl->tvp->v_type == VLNK) { error = cache_fplookup_symlink(fpl); if (cache_fpl_terminated(fpl)) { break; } } else { if (cache_fpl_islastcn(ndp)) { error = cache_fplookup_final(fpl); break; } if (!vn_seqc_consistent(fpl->dvp, fpl->dvp_seqc)) { error = cache_fpl_aborted(fpl); break; } fpl->dvp = fpl->tvp; fpl->dvp_seqc = fpl->tvp_seqc; cache_fplookup_parse_advance(fpl); } cache_fpl_checkpoint(fpl); } return (error); } /* * Fast path lookup protected with SMR and sequence counters. * * Note: all VOP_FPLOOKUP_VEXEC routines have a comment referencing this one. * * Filesystems can opt in by setting the MNTK_FPLOOKUP flag and meeting criteria * outlined below. * * Traditional vnode lookup conceptually looks like this: * * vn_lock(current); * for (;;) { * next = find(); * vn_lock(next); * vn_unlock(current); * current = next; * if (last) * break; * } * return (current); * * Each jump to the next vnode is safe memory-wise and atomic with respect to * any modifications thanks to holding respective locks. * * The same guarantee can be provided with a combination of safe memory * reclamation and sequence counters instead. If all operations which affect * the relationship between the current vnode and the one we are looking for * also modify the counter, we can verify whether all the conditions held as * we made the jump. This includes things like permissions, mount points etc. * Counter modification is provided by enclosing relevant places in * vn_seqc_write_begin()/end() calls. * * Thus this translates to: * * vfs_smr_enter(); * dvp_seqc = seqc_read_any(dvp); * if (seqc_in_modify(dvp_seqc)) // someone is altering the vnode * abort(); * for (;;) { * tvp = find(); * tvp_seqc = seqc_read_any(tvp); * if (seqc_in_modify(tvp_seqc)) // someone is altering the target vnode * abort(); * if (!seqc_consistent(dvp, dvp_seqc) // someone is altering the vnode * abort(); * dvp = tvp; // we know nothing of importance has changed * dvp_seqc = tvp_seqc; // store the counter for the tvp iteration * if (last) * break; * } * vget(); // secure the vnode * if (!seqc_consistent(tvp, tvp_seqc) // final check * abort(); * // at this point we know nothing has changed for any parent<->child pair * // as they were crossed during the lookup, meaning we matched the guarantee * // of the locked variant * return (tvp); * * The API contract for VOP_FPLOOKUP_VEXEC routines is as follows: * - they are called while within vfs_smr protection which they must never exit * - EAGAIN can be returned to denote checking could not be performed, it is * always valid to return it * - if the sequence counter has not changed the result must be valid * - if the sequence counter has changed both false positives and false negatives * are permitted (since the result will be rejected later) * - for simple cases of unix permission checks vaccess_vexec_smr can be used * * Caveats to watch out for: * - vnodes are passed unlocked and unreferenced with nothing stopping * VOP_RECLAIM, in turn meaning that ->v_data can become NULL. It is advised * to use atomic_load_ptr to fetch it. * - the aforementioned object can also get freed, meaning absent other means it * should be protected with vfs_smr * - either safely checking permissions as they are modified or guaranteeing * their stability is left to the routine */ int cache_fplookup(struct nameidata *ndp, enum cache_fpl_status *status, struct pwd **pwdp) { struct cache_fpl fpl; struct pwd *pwd; struct vnode *dvp; struct componentname *cnp; int error; fpl.status = CACHE_FPL_STATUS_UNSET; fpl.in_smr = false; fpl.ndp = ndp; fpl.cnp = cnp = &ndp->ni_cnd; MPASS(ndp->ni_lcf == 0); KASSERT ((cnp->cn_flags & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, ("%s: internal flags found in cn_flags %" PRIx64, __func__, cnp->cn_flags)); MPASS(cnp->cn_nameptr == cnp->cn_pnbuf); MPASS(ndp->ni_resflags == 0); if (__predict_false(!cache_can_fplookup(&fpl))) { *status = fpl.status; SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); return (EOPNOTSUPP); } cache_fpl_checkpoint_outer(&fpl); cache_fpl_smr_enter_initial(&fpl); #ifdef INVARIANTS fpl.debug.ni_pathlen = ndp->ni_pathlen; #endif fpl.nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; fpl.fsearch = false; fpl.tvp = NULL; /* for degenerate path handling */ fpl.pwd = pwdp; pwd = pwd_get_smr(); *(fpl.pwd) = pwd; namei_setup_rootdir(ndp, cnp, pwd); ndp->ni_topdir = pwd->pwd_jdir; if (cnp->cn_pnbuf[0] == '/') { dvp = cache_fpl_handle_root(&fpl); ndp->ni_resflags = NIRES_ABS; } else { if (ndp->ni_dirfd == AT_FDCWD) { dvp = pwd->pwd_cdir; } else { error = cache_fplookup_dirfd(&fpl, &dvp); if (__predict_false(error != 0)) { goto out; } } } SDT_PROBE4(vfs, namei, lookup, entry, dvp, cnp->cn_pnbuf, cnp->cn_flags, true); error = cache_fplookup_impl(dvp, &fpl); out: cache_fpl_smr_assert_not_entered(&fpl); cache_fpl_assert_status(&fpl); *status = fpl.status; if (SDT_PROBES_ENABLED()) { SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); if (fpl.status == CACHE_FPL_STATUS_HANDLED) SDT_PROBE4(vfs, namei, lookup, return, error, ndp->ni_vp, true, ndp); } if (__predict_true(fpl.status == CACHE_FPL_STATUS_HANDLED)) { MPASS(error != CACHE_FPL_FAILED); if (error != 0) { cache_fpl_cleanup_cnp(fpl.cnp); MPASS(fpl.dvp == NULL); MPASS(fpl.tvp == NULL); } ndp->ni_dvp = fpl.dvp; ndp->ni_vp = fpl.tvp; } return (error); } diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 127d068f1fff..86c7bdaa02c0 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -1,1623 +1,1642 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_capsicum.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #ifdef INVARIANTS #include #endif #include #include #include #ifdef INVARIANTS static void NDVALIDATE_impl(struct nameidata *, int); #define NDVALIDATE(ndp) NDVALIDATE_impl(ndp, __LINE__) #else #define NDVALIDATE(ndp) #endif /* * Prepare namei() to restart. Reset components to its original state and set * ISRESTARTED flag which signals the underlying lookup code to change the root * from ABI root to actual root and prevents a further restarts. */ #define NDRESTART(ndp) do { \ NDREINIT_DBG(ndp); \ ndp->ni_resflags = 0; \ ndp->ni_cnd.cn_flags &= ~NAMEI_INTERNAL_FLAGS; \ ndp->ni_cnd.cn_flags |= ISRESTARTED; \ } while (0) #ifdef KTRACE #define NIKTRCAPFAIL(path) ktrcapfail(CAPFAIL_NAMEI, (path)) #else #define NIKTRCAPFAIL(path) #endif #define NI_CAP_VIOLATION(ndp, path) do { \ NIKTRCAPFAIL(path); \ (ndp)->ni_lcf &= ~NI_LCF_KTR_FLAGS; \ } while (0) SDT_PROVIDER_DEFINE(vfs); SDT_PROBE_DEFINE4(vfs, namei, lookup, entry, "struct vnode *", "char *", "unsigned long", "bool"); SDT_PROBE_DEFINE4(vfs, namei, lookup, return, "int", "struct vnode *", "bool", "struct nameidata"); /* Allocation zone for namei. */ uma_zone_t namei_zone; /* Placeholder vnode for mp traversal. */ static struct vnode *vp_crossmp; static int crossmp_vop_islocked(struct vop_islocked_args *ap) { return (LK_SHARED); } static int crossmp_vop_lock1(struct vop_lock1_args *ap) { struct vnode *vp; struct lock *lk __diagused; int flags; vp = ap->a_vp; lk = vp->v_vnlock; flags = ap->a_flags; KASSERT((flags & (LK_SHARED | LK_NOWAIT)) == (LK_SHARED | LK_NOWAIT), ("%s: invalid lock request 0x%x for crossmp", __func__, flags)); if ((flags & LK_INTERLOCK) != 0) VI_UNLOCK(vp); LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, ap->a_file, ap->a_line); return (0); } static int crossmp_vop_unlock(struct vop_unlock_args *ap) { struct vnode *vp; struct lock *lk __diagused; vp = ap->a_vp; lk = vp->v_vnlock; LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, LOCK_FILE, LOCK_LINE); return (0); } static struct vop_vector crossmp_vnodeops = { .vop_default = &default_vnodeops, .vop_islocked = crossmp_vop_islocked, .vop_lock1 = crossmp_vop_lock1, .vop_unlock = crossmp_vop_unlock, }; /* * VFS_VOP_VECTOR_REGISTER(crossmp_vnodeops) is not used here since the vnode * gets allocated early. See nameiinit for the direct call below. */ struct nameicap_tracker { struct vnode *dp; TAILQ_ENTRY(nameicap_tracker) nm_link; }; /* Zone for cap mode tracker elements used for dotdot capability checks. */ MALLOC_DEFINE(M_NAMEITRACKER, "namei_tracker", "namei tracking for dotdot"); static void nameiinit(void *dummy __unused) { namei_zone = uma_zcreate("NAMEI", MAXPATHLEN, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); vfs_vector_op_register(&crossmp_vnodeops); getnewvnode("crossmp", NULL, &crossmp_vnodeops, &vp_crossmp); vp_crossmp->v_state = VSTATE_CONSTRUCTED; vp_crossmp->v_irflag |= VIRF_CROSSMP; } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL); static int lookup_cap_dotdot = 1; SYSCTL_INT(_vfs, OID_AUTO, lookup_cap_dotdot, CTLFLAG_RWTUN, &lookup_cap_dotdot, 0, "enables \"..\" components in path lookup in capability mode"); static int lookup_cap_dotdot_nonlocal = 1; SYSCTL_INT(_vfs, OID_AUTO, lookup_cap_dotdot_nonlocal, CTLFLAG_RWTUN, &lookup_cap_dotdot_nonlocal, 0, "enables \"..\" components in path lookup in capability mode " "on non-local mount"); static void nameicap_tracker_add(struct nameidata *ndp, struct vnode *dp) { struct nameicap_tracker *nt; if ((ndp->ni_lcf & NI_LCF_CAP_DOTDOT) == 0 || dp->v_type != VDIR) return; nt = TAILQ_LAST(&ndp->ni_cap_tracker, nameicap_tracker_head); if (nt != NULL && nt->dp == dp) return; nt = malloc(sizeof(*nt), M_NAMEITRACKER, M_WAITOK); vhold(dp); nt->dp = dp; TAILQ_INSERT_TAIL(&ndp->ni_cap_tracker, nt, nm_link); } static void nameicap_cleanup_from(struct nameidata *ndp, struct nameicap_tracker *first) { struct nameicap_tracker *nt, *nt1; nt = first; TAILQ_FOREACH_FROM_SAFE(nt, &ndp->ni_cap_tracker, nm_link, nt1) { TAILQ_REMOVE(&ndp->ni_cap_tracker, nt, nm_link); vdrop(nt->dp); free(nt, M_NAMEITRACKER); } } static void nameicap_cleanup(struct nameidata *ndp) { KASSERT(TAILQ_EMPTY(&ndp->ni_cap_tracker) || (ndp->ni_lcf & NI_LCF_CAP_DOTDOT) != 0, ("not strictrelative")); nameicap_cleanup_from(ndp, NULL); } /* * For dotdot lookups in capability mode, only allow the component * lookup to succeed if the resulting directory was already traversed * during the operation. This catches situations where already * traversed directory is moved to different parent, and then we walk * over it with dotdots. * * Also allow to force failure of dotdot lookups for non-local * filesystems, where external agents might assist local lookups to * escape the compartment. */ static int nameicap_check_dotdot(struct nameidata *ndp, struct vnode *dp) { struct nameicap_tracker *nt; struct mount *mp; if (dp == NULL || dp->v_type != VDIR || (ndp->ni_lcf & NI_LCF_STRICTREL) == 0) return (0); if (__predict_false((ndp->ni_lcf & (NI_LCF_STRICTREL_KTR | NI_LCF_CAP_DOTDOT_KTR)) == NI_LCF_STRICTREL_KTR)) NI_CAP_VIOLATION(ndp, ndp->ni_cnd.cn_pnbuf); if ((ndp->ni_lcf & NI_LCF_CAP_DOTDOT) == 0) return (ENOTCAPABLE); mp = dp->v_mount; if (lookup_cap_dotdot_nonlocal == 0 && mp != NULL && (mp->mnt_flag & MNT_LOCAL) == 0) goto capfail; TAILQ_FOREACH_REVERSE(nt, &ndp->ni_cap_tracker, nameicap_tracker_head, nm_link) { if (dp == nt->dp) { nt = TAILQ_NEXT(nt, nm_link); if (nt != NULL) nameicap_cleanup_from(ndp, nt); return (0); } } capfail: if (__predict_false((ndp->ni_lcf & NI_LCF_STRICTREL_KTR) != 0)) NI_CAP_VIOLATION(ndp, ndp->ni_cnd.cn_pnbuf); return (ENOTCAPABLE); } static void namei_cleanup_cnp(struct componentname *cnp) { uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = NULL; cnp->cn_nameptr = NULL; } static int namei_handle_root(struct nameidata *ndp, struct vnode **dpp) { struct componentname *cnp; cnp = &ndp->ni_cnd; if (__predict_false((ndp->ni_lcf & (NI_LCF_STRICTREL | NI_LCF_STRICTREL_KTR)) != 0)) { if ((ndp->ni_lcf & NI_LCF_STRICTREL_KTR) != 0) NI_CAP_VIOLATION(ndp, cnp->cn_pnbuf); if ((ndp->ni_lcf & NI_LCF_STRICTREL) != 0) return (ENOTCAPABLE); } while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } *dpp = ndp->ni_rootdir; vrefact(*dpp); return (0); } static int namei_setup(struct nameidata *ndp, struct vnode **dpp, struct pwd **pwdp) { struct componentname *cnp; struct thread *td; struct pwd *pwd; int error; bool startdir_used; cnp = &ndp->ni_cnd; td = curthread; startdir_used = false; *pwdp = NULL; *dpp = NULL; #ifdef CAPABILITY_MODE /* * In capability mode, lookups must be restricted to happen in * the subtree with the root specified by the file descriptor: * - The root must be real file descriptor, not the pseudo-descriptor * AT_FDCWD. * - The passed path must be relative and not absolute. * - If lookup_cap_dotdot is disabled, path must not contain the * '..' components. * - If lookup_cap_dotdot is enabled, we verify that all '..' * components lookups result in the directories which were * previously walked by us, which prevents an escape from * the relative root. */ if ((cnp->cn_flags & NOCAPCHECK) == 0) { if (CAP_TRACING(td)) { ndp->ni_lcf |= NI_LCF_STRICTREL_KTR; if (ndp->ni_dirfd == AT_FDCWD) NI_CAP_VIOLATION(ndp, "AT_FDCWD"); } if (IN_CAPABILITY_MODE(td)) { ndp->ni_lcf |= NI_LCF_STRICTREL; ndp->ni_resflags |= NIRES_STRICTREL; if (ndp->ni_dirfd == AT_FDCWD) return (ECAPMODE); } } #endif error = 0; /* * Get starting point for the translation. */ pwd = pwd_hold(td); /* * The reference on ni_rootdir is acquired in the block below to avoid * back-to-back atomics for absolute lookups. */ namei_setup_rootdir(ndp, cnp, pwd); ndp->ni_topdir = pwd->pwd_jdir; if (cnp->cn_pnbuf[0] == '/') { ndp->ni_resflags |= NIRES_ABS; error = namei_handle_root(ndp, dpp); } else { if (ndp->ni_startdir != NULL) { *dpp = ndp->ni_startdir; startdir_used = true; } else if (ndp->ni_dirfd == AT_FDCWD) { *dpp = pwd->pwd_cdir; vrefact(*dpp); } else { if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_ATFD1(ndp->ni_dirfd); if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_ATFD2(ndp->ni_dirfd); error = fgetvp_lookup(ndp, dpp); } if (error == 0 && (*dpp)->v_type != VDIR && (cnp->cn_flags & OPENNAMED) == 0 && (cnp->cn_pnbuf[0] != '\0' || (cnp->cn_flags & EMPTYPATH) == 0)) error = ENOTDIR; } if (error == 0 && (cnp->cn_flags & RBENEATH) != 0) { if (cnp->cn_pnbuf[0] == '/') { error = ENOTCAPABLE; } else if ((ndp->ni_lcf & NI_LCF_STRICTREL) == 0) { ndp->ni_lcf |= NI_LCF_STRICTREL | NI_LCF_CAP_DOTDOT; } } /* * If we are auditing the kernel pathname, save the user pathname. */ if (AUDITING_TD(td)) { if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_UPATH1_VP(td, ndp->ni_rootdir, *dpp, cnp->cn_pnbuf); if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_UPATH2_VP(td, ndp->ni_rootdir, *dpp, cnp->cn_pnbuf); } if (ndp->ni_startdir != NULL && !startdir_used) vrele(ndp->ni_startdir); if (error != 0) { if (*dpp != NULL) vrele(*dpp); pwd_drop(pwd); return (error); } if (lookup_cap_dotdot != 0) { if ((ndp->ni_lcf & NI_LCF_STRICTREL_KTR) != 0) ndp->ni_lcf |= NI_LCF_CAP_DOTDOT_KTR; if ((ndp->ni_lcf & NI_LCF_STRICTREL) != 0) ndp->ni_lcf |= NI_LCF_CAP_DOTDOT; } SDT_PROBE4(vfs, namei, lookup, entry, *dpp, cnp->cn_pnbuf, cnp->cn_flags, false); *pwdp = pwd; return (0); } static int namei_getpath(struct nameidata *ndp) { struct componentname *cnp; int error; cnp = &ndp->ni_cnd; /* * Get a buffer for the name to be translated, and copy the * name into the buffer. */ cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); if (ndp->ni_segflg == UIO_SYSSPACE) { error = copystr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); } else { error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); } return (error); } static int namei_emptypath(struct nameidata *ndp) { struct componentname *cnp; struct pwd *pwd; struct vnode *dp; int error; cnp = &ndp->ni_cnd; MPASS(*cnp->cn_pnbuf == '\0'); MPASS((cnp->cn_flags & EMPTYPATH) != 0); MPASS((cnp->cn_flags & (LOCKPARENT | WANTPARENT)) == 0); ndp->ni_resflags |= NIRES_EMPTYPATH; error = namei_setup(ndp, &dp, &pwd); if (error != 0) { goto errout; } /* * Usecount on dp already provided by namei_setup. */ ndp->ni_vp = dp; pwd_drop(pwd); NDVALIDATE(ndp); if ((cnp->cn_flags & LOCKLEAF) != 0) { VOP_LOCK(dp, (cnp->cn_flags & LOCKSHARED) != 0 ? LK_SHARED : LK_EXCLUSIVE); if (VN_IS_DOOMED(dp)) { vput(dp); error = ENOENT; goto errout; } } SDT_PROBE4(vfs, namei, lookup, return, 0, ndp->ni_vp, false, ndp); return (0); errout: SDT_PROBE4(vfs, namei, lookup, return, error, NULL, false, ndp); namei_cleanup_cnp(cnp); return (error); } static int __noinline namei_follow_link(struct nameidata *ndp) { char *cp; struct iovec aiov; struct uio auio; struct componentname *cnp; struct thread *td; int error, linklen; error = 0; cnp = &ndp->ni_cnd; td = curthread; if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { error = ELOOP; goto out; } #ifdef MAC if ((cnp->cn_flags & NOMACCHECK) == 0) { error = mac_vnode_check_readlink(td->td_ucred, ndp->ni_vp); if (error != 0) goto out; } #endif if (ndp->ni_pathlen > 1) cp = uma_zalloc(namei_zone, M_WAITOK); else cp = cnp->cn_pnbuf; aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = MAXPATHLEN; error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); if (error != 0) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); goto out; } linklen = MAXPATHLEN - auio.uio_resid; if (linklen == 0) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); error = ENOENT; goto out; } if (linklen + ndp->ni_pathlen > MAXPATHLEN) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); error = ENAMETOOLONG; goto out; } if (ndp->ni_pathlen > 1) { bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = cp; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; out: return (error); } /* * Convert a pathname into a pointer to a locked vnode. * * The FOLLOW flag is set when symbolic links are to be followed * when they occur at the end of the name translation process. * Symbolic links are always followed for all other pathname * components other than the last. * * The segflg defines whether the name is to be copied from user * space or kernel space. * * Overall outline of namei: * * copy in name * get starting directory * while (!done && !error) { * call lookup to search path. * if symbolic link, massage name in buffer and continue * } */ int namei(struct nameidata *ndp) { struct vnode *dp; /* the directory we are searching */ struct componentname *cnp; struct thread *td; struct pwd *pwd; int error; enum cache_fpl_status status; cnp = &ndp->ni_cnd; td = curthread; #ifdef INVARIANTS KASSERT((ndp->ni_debugflags & NAMEI_DBG_CALLED) == 0, ("%s: repeated call to namei without NDREINIT", __func__)); KASSERT(ndp->ni_debugflags == NAMEI_DBG_INITED, ("%s: bad debugflags %d", __func__, ndp->ni_debugflags)); ndp->ni_debugflags |= NAMEI_DBG_CALLED; if (ndp->ni_startdir != NULL) ndp->ni_debugflags |= NAMEI_DBG_HADSTARTDIR; if (cnp->cn_flags & FAILIFEXISTS) { KASSERT(cnp->cn_nameiop == CREATE, ("%s: FAILIFEXISTS passed for op %d", __func__, cnp->cn_nameiop)); /* * The limitation below is to restrict hairy corner cases. */ KASSERT((cnp->cn_flags & (LOCKPARENT | LOCKLEAF)) == LOCKPARENT, ("%s: FAILIFEXISTS must be passed with LOCKPARENT and without LOCKLEAF", __func__)); } #endif ndp->ni_cnd.cn_cred = td->td_ucred; - KASSERT(ndp->ni_resflags == 0, ("%s: garbage in ni_resflags: %x\n", + KASSERT(ndp->ni_resflags == 0, ("%s: garbage in ni_resflags: %x", __func__, ndp->ni_resflags)); KASSERT(cnp->cn_cred && td->td_proc, ("namei: bad cred/proc")); KASSERT((cnp->cn_flags & NAMEI_INTERNAL_FLAGS) == 0, - ("namei: unexpected flags: %" PRIx64 "\n", - cnp->cn_flags & NAMEI_INTERNAL_FLAGS)); + ("namei: unexpected flags: %#jx", + (uintmax_t)(cnp->cn_flags & NAMEI_INTERNAL_FLAGS))); if (cnp->cn_flags & NOCACHE) KASSERT(cnp->cn_nameiop != LOOKUP, ("%s: NOCACHE passed with LOOKUP", __func__)); MPASS(ndp->ni_startdir == NULL || ndp->ni_startdir->v_type == VDIR || ndp->ni_startdir->v_type == VBAD); restart: ndp->ni_lcf = 0; ndp->ni_loopcnt = 0; ndp->ni_vp = NULL; error = namei_getpath(ndp); if (__predict_false(error != 0)) { namei_cleanup_cnp(cnp); SDT_PROBE4(vfs, namei, lookup, return, error, NULL, false, ndp); return (error); } cnp->cn_nameptr = cnp->cn_pnbuf; #ifdef KTRACE if (KTRPOINT(td, KTR_NAMEI)) { ktrnamei(cnp->cn_pnbuf); } #endif TSNAMEI(curthread->td_proc->p_pid, cnp->cn_pnbuf); /* * First try looking up the target without locking any vnodes. * * We may need to start from scratch or pick up where it left off. */ error = cache_fplookup(ndp, &status, &pwd); switch (status) { case CACHE_FPL_STATUS_UNSET: __assert_unreachable(); break; case CACHE_FPL_STATUS_HANDLED: if (error == 0) NDVALIDATE(ndp); else if (__predict_false(pwd->pwd_adir != pwd->pwd_rdir && (cnp->cn_flags & ISRESTARTED) == 0)) { namei_cleanup_cnp(cnp); NDRESTART(ndp); goto restart; } return (error); case CACHE_FPL_STATUS_PARTIAL: TAILQ_INIT(&ndp->ni_cap_tracker); dp = ndp->ni_startdir; break; case CACHE_FPL_STATUS_DESTROYED: ndp->ni_loopcnt = 0; error = namei_getpath(ndp); if (__predict_false(error != 0)) { namei_cleanup_cnp(cnp); return (error); } cnp->cn_nameptr = cnp->cn_pnbuf; /* FALLTHROUGH */ case CACHE_FPL_STATUS_ABORTED: TAILQ_INIT(&ndp->ni_cap_tracker); MPASS(ndp->ni_lcf == 0); if (*cnp->cn_pnbuf == '\0') { if ((cnp->cn_flags & EMPTYPATH) != 0) { return (namei_emptypath(ndp)); } namei_cleanup_cnp(cnp); SDT_PROBE4(vfs, namei, lookup, return, ENOENT, NULL, false, ndp); return (ENOENT); } error = namei_setup(ndp, &dp, &pwd); if (error != 0) { namei_cleanup_cnp(cnp); return (error); } break; } /* * Locked lookup. */ for (;;) { ndp->ni_startdir = dp; error = vfs_lookup(ndp); if (error != 0) { if (__predict_false(pwd->pwd_adir != pwd->pwd_rdir && error == ENOENT && (cnp->cn_flags & ISRESTARTED) == 0)) { nameicap_cleanup(ndp); pwd_drop(pwd); namei_cleanup_cnp(cnp); NDRESTART(ndp); goto restart; } else goto out; } /* * If not a symbolic link, we're done. */ if ((cnp->cn_flags & ISSYMLINK) == 0) { SDT_PROBE4(vfs, namei, lookup, return, error, ndp->ni_vp, false, ndp); nameicap_cleanup(ndp); pwd_drop(pwd); NDVALIDATE(ndp); return (0); } error = namei_follow_link(ndp); if (error != 0) break; vput(ndp->ni_vp); dp = ndp->ni_dvp; /* * Check if root directory should replace current directory. */ cnp->cn_nameptr = cnp->cn_pnbuf; if (*(cnp->cn_nameptr) == '/') { /* * Reset the lookup to start from the real root without * origin path name reloading. */ if (__predict_false(ndp->ni_rootdir != pwd->pwd_rdir)) { cnp->cn_flags |= ISRESTARTED; ndp->ni_rootdir = pwd->pwd_rdir; } vrele(dp); error = namei_handle_root(ndp, &dp); if (error != 0) goto out; } } vput(ndp->ni_vp); ndp->ni_vp = NULL; vrele(ndp->ni_dvp); out: MPASS(error != 0); SDT_PROBE4(vfs, namei, lookup, return, error, NULL, false, ndp); namei_cleanup_cnp(cnp); nameicap_cleanup(ndp); pwd_drop(pwd); return (error); } static int enforce_lkflags(struct mount *mp, int lkflags) { if (mp == NULL || ((lkflags & LK_SHARED) && !(mp->mnt_kern_flag & MNTK_LOOKUP_SHARED))) { lkflags &= ~LK_SHARED; lkflags |= LK_EXCLUSIVE; } lkflags |= LK_NODDLKTREAT; return (lkflags); } static __inline int needs_exclusive_leaf(struct mount *mp, int flags) { /* * Intermediate nodes can use shared locks, we only need to * force an exclusive lock for leaf nodes. */ if ((flags & (ISLASTCN | LOCKLEAF)) != (ISLASTCN | LOCKLEAF)) return (0); /* Always use exclusive locks if LOCKSHARED isn't set. */ if (!(flags & LOCKSHARED)) return (1); /* * For lookups during open(), if the mount point supports * extended shared operations, then use a shared lock for the * leaf node, otherwise use an exclusive lock. */ if ((flags & ISOPEN) != 0) return (!MNT_EXTENDED_SHARED(mp)); /* * Lookup requests outside of open() that specify LOCKSHARED * only need a shared lock on the leaf vnode. */ return (0); } /* * Various filesystems expect to be able to copy a name component with length * bounded by NAME_MAX into a directory entry buffer of size MAXNAMLEN. Make * sure that these are the same size. */ _Static_assert(MAXNAMLEN == NAME_MAX, "MAXNAMLEN and NAME_MAX have different values"); static int __noinline vfs_lookup_degenerate(struct nameidata *ndp, struct vnode *dp, int wantparent) { struct componentname *cnp; struct mount *mp; int error; cnp = &ndp->ni_cnd; cnp->cn_flags |= ISLASTCN; mp = atomic_load_ptr(&dp->v_mount); if (needs_exclusive_leaf(mp, cnp->cn_flags)) { cnp->cn_lkflags &= ~LK_SHARED; cnp->cn_lkflags |= LK_EXCLUSIVE; } vn_lock(dp, enforce_lkflags(mp, cnp->cn_lkflags | LK_RETRY)); if (dp->v_type != VDIR) { error = ENOTDIR; goto bad; } if (cnp->cn_nameiop != LOOKUP) { error = EISDIR; goto bad; } if (wantparent) { ndp->ni_dvp = dp; VREF(dp); } ndp->ni_vp = dp; cnp->cn_namelen = 0; if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_VNODE1(dp); else if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_VNODE2(dp); if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF))) VOP_UNLOCK(dp); return (0); bad: VOP_UNLOCK(dp); return (error); } +struct nameidata * +vfs_lookup_nameidata(struct componentname *cnp) +{ + if ((cnp->cn_flags & NAMEILOOKUP) == 0) + return (NULL); + return (__containerof(cnp, struct nameidata, ni_cnd)); +} + +/* + * Would a dotdot lookup relative to dvp cause this lookup to cross a jail or + * chroot boundary? + */ +bool +vfs_lookup_isroot(struct nameidata *ndp, struct vnode *dvp) +{ + for (struct prison *pr = ndp->ni_cnd.cn_cred->cr_prison; pr != NULL; + pr = pr->pr_parent) { + if (dvp == pr->pr_root) + return (true); + } + return (dvp == ndp->ni_rootdir || dvp == ndp->ni_topdir || + dvp == rootvnode); +} + /* * FAILIFEXISTS handling. * * XXX namei called with LOCKPARENT but not LOCKLEAF has the strange * behaviour of leaving the vnode unlocked if the target is the same * vnode as the parent. */ static int __noinline vfs_lookup_failifexists(struct nameidata *ndp) { struct componentname *cnp __diagused; cnp = &ndp->ni_cnd; MPASS((cnp->cn_flags & ISSYMLINK) == 0); if (ndp->ni_vp == ndp->ni_dvp) vrele(ndp->ni_dvp); else vput(ndp->ni_dvp); vrele(ndp->ni_vp); ndp->ni_dvp = NULL; ndp->ni_vp = NULL; NDFREE_PNBUF(ndp); return (EEXIST); } static int __noinline vfs_lookup_cross_mount(struct nameidata *ndp) { struct componentname *cnp; struct mount *mp; struct vnode *dp, *tdp; int error, crosslkflags; bool crosslock; cnp = &ndp->ni_cnd; dp = ndp->ni_vp; /* * The vnode has been mounted on, find the root of the mounted * filesystem. */ do { mp = dp->v_mountedhere; ASSERT_VOP_LOCKED(dp, __func__); VNPASS((vn_irflag_read(dp) & VIRF_MOUNTPOINT) != 0 && mp != NULL, dp); crosslock = (dp->v_vflag & VV_CROSSLOCK) != 0; crosslkflags = enforce_lkflags(mp, cnp->cn_lkflags); if (__predict_false(crosslock)) { /* * We are going to be holding the vnode lock, which * in this case is shared by the root vnode of the * filesystem mounted at mp, across the call to * VFS_ROOT(). Make the situation clear to the * filesystem by passing LK_CANRECURSE if the * lock is held exclusive, or by clearinng * LK_NODDLKTREAT to allow recursion on the shared * lock in the presence of an exclusive waiter. */ if (VOP_ISLOCKED(dp) == LK_EXCLUSIVE) { crosslkflags &= ~LK_SHARED; crosslkflags |= LK_EXCLUSIVE | LK_CANRECURSE; } else if ((crosslkflags & LK_EXCLUSIVE) != 0) { error = vn_lock(dp, LK_UPGRADE); if (error != 0) { MPASS(error == ENOENT); vrele(dp); if (dp != ndp->ni_dvp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); break; } if (dp->v_mountedhere != mp) { /* * Note that we rely on the * VIRF_MOUNTPOINT loop condition to * ensure we stop iterating if dp is * no longer a mountpoint at all. */ continue; } } else crosslkflags &= ~LK_NODDLKTREAT; } if (vfs_busy(mp, 0) != 0) continue; if (__predict_true(!crosslock)) vput(dp); if (dp != ndp->ni_dvp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); vrefact(vp_crossmp); ndp->ni_dvp = vp_crossmp; error = VFS_ROOT(mp, crosslkflags, &tdp); vfs_unbusy(mp); if (__predict_false(crosslock)) vput(dp); if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT)) panic("vp_crossmp exclusively locked or reclaimed"); if (error != 0) break; ndp->ni_vp = dp = tdp; } while ((vn_irflag_read(dp) & VIRF_MOUNTPOINT) != 0); return (error); } /* * Search a pathname. * This is a very central and rather complicated routine. * * The pathname is pointed to by cn_nameptr and is of length ni_pathlen. * The starting directory is taken from ni_startdir. The pathname is * descended until done, or a symbolic link is encountered. The cn_flags * has ISLASTCN or'ed if the path is completed or ISSYMLINK or'ed if a * symbolic link needing interpretation is encountered. * * The cn_nameiop is LOOKUP, CREATE, RENAME, or DELETE depending on * whether the name is to be looked up, created, renamed, or deleted. * When CREATE, RENAME, or DELETE is specified, information usable in * creating, renaming, or deleting a directory entry may be calculated. * If cn_flags has LOCKPARENT or'ed into it, the parent directory is returned * locked. If it has WANTPARENT or'ed into it, the parent directory is * returned unlocked. Otherwise the parent directory is not returned. If * the target of the pathname exists and LOCKLEAF is or'ed into the cn_flags * the target is returned locked, otherwise it is returned unlocked. * * Overall outline of lookup: * * handle degenerate case where name is null string * * dirloop: * identify next component of name at ndp->ni_cnd.cn_nameptr * handle .. special cases related to capabilities, chroot, jail * if .. and crossing mount points and on mounted filesys, find parent * call VOP_LOOKUP routine for next component name * directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set * component vnode returned in ni_vp (if it exists), locked. * if result vnode is mounted on and crossing mount points, * find mounted on vnode * if more components of name, do next level at dirloop * if VOP_LOOKUP returns ERELOOKUP, repeat the same level at dirloop * return the answer in ni_vp, locked if LOCKLEAF set * if LOCKPARENT set, return locked parent in ni_dvp * if WANTPARENT set, return unlocked parent in ni_dvp */ int vfs_lookup(struct nameidata *ndp) { char *cp; /* pointer into pathname argument */ char *prev_ni_next; /* saved ndp->ni_next */ char *nulchar; /* location of '\0' in cn_pnbuf */ char *lastchar; /* location of the last character */ struct vnode *dp = NULL; /* the directory we are searching */ struct vnode *tdp; /* saved dp */ - struct prison *pr; size_t prev_ni_pathlen; /* saved ndp->ni_pathlen */ int docache; /* == 0 do not cache last component */ int wantparent; /* 1 => wantparent or lockparent flag */ int rdonly; /* lookup read-only flag bit */ int error = 0; int relookup = 0; /* do not consume the path component */ struct componentname *cnp = &ndp->ni_cnd; int lkflags_save; int ni_dvp_unlocked; /* * Setup: break out flag bits into variables. */ ni_dvp_unlocked = 0; wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT); KASSERT(cnp->cn_nameiop == LOOKUP || wantparent, ("CREATE, DELETE, RENAME require LOCKPARENT or WANTPARENT.")); /* * When set to zero, docache causes the last component of the * pathname to be deleted from the cache and the full lookup * of the name to be done (via VOP_CACHEDLOOKUP()). Often * filesystems need some pre-computed values that are made * during the full lookup, for instance UFS sets dp->i_offset. * * The docache variable is set to zero when requested by the * NOCACHE flag and for all modifying operations except CREATE. */ docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; if (cnp->cn_nameiop == DELETE || (wantparent && cnp->cn_nameiop != CREATE && cnp->cn_nameiop != LOOKUP)) docache = 0; rdonly = cnp->cn_flags & RDONLY; cnp->cn_flags &= ~ISSYMLINK; ndp->ni_dvp = NULL; cnp->cn_lkflags = LK_SHARED; dp = ndp->ni_startdir; ndp->ni_startdir = NULLVP; /* * Leading slashes, if any, are supposed to be skipped by the caller. */ MPASS(cnp->cn_nameptr[0] != '/'); /* * Check for degenerate name (e.g. / or "") which is a way of talking * about a directory, e.g. like "/." or ".". */ if (__predict_false(cnp->cn_nameptr[0] == '\0')) { error = vfs_lookup_degenerate(ndp, dp, wantparent); if (error == 0) goto success_right_lock; goto bad_unlocked; } /* * Nul-out trailing slashes (e.g., "foo///" -> "foo"). * * This must be done before VOP_LOOKUP() because some fs's don't know * about trailing slashes. Remember if there were trailing slashes to * handle symlinks, existing non-directories and non-existing files * that won't be directories specially later. */ MPASS(ndp->ni_pathlen >= 2); lastchar = &cnp->cn_nameptr[ndp->ni_pathlen - 2]; if (*lastchar == '/') { while (lastchar >= cnp->cn_pnbuf) { *lastchar = '\0'; lastchar--; ndp->ni_pathlen--; if (*lastchar != '/') { break; } } cnp->cn_flags |= TRAILINGSLASH; } /* * We use shared locks until we hit the parent of the last cn then * we adjust based on the requesting flags. */ vn_lock(dp, enforce_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY)); dirloop: /* * Search a new directory. * * The last component of the filename is left accessible via * cnp->cn_nameptr. It has to be freed with a call to NDFREE*. * * Store / as a temporary sentinel so that we only have one character * to test for. Pathnames tend to be short so this should not be * resulting in cache misses. */ nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; KASSERT(*nulchar == '\0', ("%s: expected nul at %p; string [%s]\n", __func__, nulchar, cnp->cn_pnbuf)); *nulchar = '/'; for (cp = cnp->cn_nameptr; *cp != '/'; cp++) { KASSERT(*cp != '\0', ("%s: encountered unexpected nul; string [%s]\n", __func__, cnp->cn_nameptr)); continue; } *nulchar = '\0'; cnp->cn_namelen = cp - cnp->cn_nameptr; if (__predict_false(cnp->cn_namelen > NAME_MAX)) { error = ENAMETOOLONG; goto bad; } prev_ni_pathlen = ndp->ni_pathlen; ndp->ni_pathlen -= cnp->cn_namelen; KASSERT(ndp->ni_pathlen <= PATH_MAX, ("%s: ni_pathlen underflow to %zd\n", __func__, ndp->ni_pathlen)); prev_ni_next = ndp->ni_next; ndp->ni_next = cp; /* * Something else should be clearing this. */ cnp->cn_flags &= ~(ISDOTDOT|ISLASTCN); cnp->cn_flags |= MAKEENTRY; if (*cp == '\0' && docache == 0) cnp->cn_flags &= ~MAKEENTRY; if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') cnp->cn_flags |= ISDOTDOT; if (*ndp->ni_next == 0) { cnp->cn_flags |= ISLASTCN; if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.' && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))) { error = EINVAL; goto bad; } } nameicap_tracker_add(ndp, dp); /* * Make sure degenerate names don't get here, their handling was * previously found in this spot. */ MPASS(cnp->cn_nameptr[0] != '\0'); /* * Handle "..": five special cases. * 0. If doing a capability lookup and lookup_cap_dotdot is * disabled, return ENOTCAPABLE. * 1. Return an error if this is the last component of * the name and the operation is DELETE or RENAME. * 2. If at root directory (e.g. after chroot) * or at absolute root directory * then ignore it so can't get out. * 3. If this vnode is the root of a mounted * filesystem, then replace it with the * vnode which was mounted on so we take the * .. in the other filesystem. * 4. If the vnode is the top directory of * the jail or chroot, don't let them out. * 5. If doing a capability lookup and lookup_cap_dotdot is * enabled, return ENOTCAPABLE if the lookup would escape * from the initial file descriptor directory. Checks are * done by ensuring that namei() already traversed the * result of dotdot lookup. */ if (cnp->cn_flags & ISDOTDOT) { if (__predict_false((ndp->ni_lcf & (NI_LCF_STRICTREL_KTR | NI_LCF_CAP_DOTDOT_KTR)) == NI_LCF_STRICTREL_KTR)) NI_CAP_VIOLATION(ndp, cnp->cn_pnbuf); if (__predict_false((ndp->ni_lcf & (NI_LCF_STRICTREL | NI_LCF_CAP_DOTDOT)) == NI_LCF_STRICTREL)) { error = ENOTCAPABLE; goto bad; } if ((cnp->cn_flags & ISLASTCN) != 0 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { error = EINVAL; goto bad; } for (;;) { - for (pr = cnp->cn_cred->cr_prison; pr != NULL; - pr = pr->pr_parent) - if (dp == pr->pr_root) - break; - bool isroot = dp == ndp->ni_rootdir || - dp == ndp->ni_topdir || dp == rootvnode || - pr != NULL; + bool isroot; + + isroot = vfs_lookup_isroot(ndp, dp); if (__predict_false(isroot && (ndp->ni_lcf & (NI_LCF_STRICTREL | NI_LCF_STRICTREL_KTR)) != 0)) { if ((ndp->ni_lcf & NI_LCF_STRICTREL_KTR) != 0) NI_CAP_VIOLATION(ndp, cnp->cn_pnbuf); if ((ndp->ni_lcf & NI_LCF_STRICTREL) != 0) { error = ENOTCAPABLE; goto capdotdot; } } if (isroot || ((dp->v_vflag & VV_ROOT) != 0 && (cnp->cn_flags & NOCROSSMOUNT) != 0)) { ndp->ni_dvp = dp; ndp->ni_vp = dp; VREF(dp); goto nextname; } if ((dp->v_vflag & VV_ROOT) == 0) break; if (VN_IS_DOOMED(dp)) { /* forced unmount */ error = ENOENT; goto bad; } tdp = dp; dp = dp->v_mount->mnt_vnodecovered; VREF(dp); vput(tdp); vn_lock(dp, enforce_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY)); error = nameicap_check_dotdot(ndp, dp); if (error != 0) { capdotdot: goto bad; } } } /* * We now have a segment name to search for, and a directory to search. */ unionlookup: #ifdef MAC error = mac_vnode_check_lookup(cnp->cn_cred, dp, cnp); if (__predict_false(error)) goto bad; #endif ndp->ni_dvp = dp; ndp->ni_vp = NULL; ASSERT_VOP_LOCKED(dp, "lookup"); /* * If we have a shared lock we may need to upgrade the lock for the * last operation. */ if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) && dp != vp_crossmp && VOP_ISLOCKED(dp) == LK_SHARED) vn_lock(dp, LK_UPGRADE|LK_RETRY); if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad; } /* * If we're looking up the last component and we need an exclusive * lock, adjust our lkflags. */ if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags)) cnp->cn_lkflags = LK_EXCLUSIVE; lkflags_save = cnp->cn_lkflags; cnp->cn_lkflags = enforce_lkflags(dp->v_mount, cnp->cn_lkflags); error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp); cnp->cn_lkflags = lkflags_save; if (error != 0) { KASSERT(ndp->ni_vp == NULL, ("leaf should be empty")); if ((error == ENOENT) && (dp->v_vflag & VV_ROOT) && (dp->v_mount != NULL) && (dp->v_mount->mnt_flag & MNT_UNION)) { tdp = dp; dp = dp->v_mount->mnt_vnodecovered; VREF(dp); vput(tdp); vn_lock(dp, enforce_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY)); nameicap_tracker_add(ndp, dp); goto unionlookup; } if (error == ERELOOKUP) { vref(dp); ndp->ni_vp = dp; error = 0; relookup = 1; goto good; } if (error != EJUSTRETURN) goto bad; /* * At this point, we know we're at the end of the * pathname. If creating / renaming, we can consider * allowing the file or directory to be created / renamed, * provided we're not on a read-only filesystem. */ if (rdonly) { error = EROFS; goto bad; } /* trailing slash only allowed for directories */ if ((cnp->cn_flags & TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) { error = ENOENT; goto bad; } if ((cnp->cn_flags & LOCKPARENT) == 0) VOP_UNLOCK(dp); /* * We return with ni_vp NULL to indicate that the entry * doesn't currently exist, leaving a pointer to the * (possibly locked) directory vnode in ndp->ni_dvp. */ goto success; } good: dp = ndp->ni_vp; /* * Check for symbolic link */ if ((dp->v_type == VLNK) && ((cnp->cn_flags & FOLLOW) || (cnp->cn_flags & TRAILINGSLASH) || *ndp->ni_next == '/')) { cnp->cn_flags |= ISSYMLINK; if (VN_IS_DOOMED(dp)) { /* * We can't know whether the directory was mounted with * NOSYMFOLLOW, so we can't follow safely. */ error = ENOENT; goto bad2; } if (dp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) { error = EACCES; goto bad2; } /* * Symlink code always expects an unlocked dvp. */ if (ndp->ni_dvp != ndp->ni_vp) { VOP_UNLOCK(ndp->ni_dvp); ni_dvp_unlocked = 1; } goto success; } if ((vn_irflag_read(dp) & VIRF_MOUNTPOINT) != 0 && (cnp->cn_flags & NOCROSSMOUNT) == 0) { error = vfs_lookup_cross_mount(ndp); if (error != 0) goto bad_unlocked; /* * FALLTHROUGH to nextname */ dp = ndp->ni_vp; } nextname: /* * Not a symbolic link that we will follow. Continue with the * next component if there is any; otherwise, we're done. */ KASSERT((cnp->cn_flags & ISLASTCN) || *ndp->ni_next == '/', ("lookup: invalid path state.")); if (relookup) { relookup = 0; ndp->ni_pathlen = prev_ni_pathlen; ndp->ni_next = prev_ni_next; if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); goto dirloop; } if (cnp->cn_flags & ISDOTDOT) { error = nameicap_check_dotdot(ndp, ndp->ni_vp); if (error != 0) goto bad2; } if (*ndp->ni_next == '/') { cnp->cn_nameptr = ndp->ni_next; while (*cnp->cn_nameptr == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); goto dirloop; } /* * If we're processing a path with a trailing slash, * check that the end result is a directory. */ if ((cnp->cn_flags & TRAILINGSLASH) && dp->v_type != VDIR) { error = ENOTDIR; goto bad2; } /* * Disallow directory write attempts on read-only filesystems. */ if (rdonly && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { error = EROFS; goto bad2; } if (!wantparent) { ni_dvp_unlocked = 2; if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); } else if ((cnp->cn_flags & LOCKPARENT) == 0 && ndp->ni_dvp != dp) { VOP_UNLOCK(ndp->ni_dvp); ni_dvp_unlocked = 1; } if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_VNODE1(dp); else if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_VNODE2(dp); if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp); success: /* * FIXME: for lookups which only cross a mount point to fetch the * root vnode, ni_dvp will be set to vp_crossmp. This can be a problem * if either WANTPARENT or LOCKPARENT is set. */ /* * Because of shared lookup we may have the vnode shared locked, but * the caller may want it to be exclusively locked. */ if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) { vn_lock(dp, LK_UPGRADE | LK_RETRY); if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad2; } } success_right_lock: if (ndp->ni_vp != NULL) { if ((cnp->cn_flags & ISDOTDOT) == 0) nameicap_tracker_add(ndp, ndp->ni_vp); if ((cnp->cn_flags & (FAILIFEXISTS | ISSYMLINK)) == FAILIFEXISTS) return (vfs_lookup_failifexists(ndp)); } return (0); bad2: if (ni_dvp_unlocked != 2) { if (dp != ndp->ni_dvp && !ni_dvp_unlocked) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); } bad: vput(dp); bad_unlocked: ndp->ni_vp = NULL; return (error); } /* * relookup - lookup a path name component * Used by lookup to re-acquire things. */ int vfs_relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, bool refstart) { struct vnode *dp = NULL; /* the directory we are searching */ int rdonly; /* lookup read-only flag bit */ int error = 0; KASSERT(cnp->cn_flags & ISLASTCN, ("relookup: Not given last component.")); /* * Setup: break out flag bits into variables. */ KASSERT((cnp->cn_flags & (LOCKPARENT | WANTPARENT)) != 0, ("relookup: parent not wanted")); rdonly = cnp->cn_flags & RDONLY; cnp->cn_flags &= ~ISSYMLINK; dp = dvp; cnp->cn_lkflags = LK_EXCLUSIVE; vn_lock(dp, LK_EXCLUSIVE | LK_RETRY); /* * Search a new directory. * * See a comment in vfs_lookup for cnp->cn_nameptr. * * Check for "" which represents the root directory after slash * removal. */ if (cnp->cn_nameptr[0] == '\0') { /* * Support only LOOKUP for "/" because lookup() * can't succeed for CREATE, DELETE and RENAME. */ KASSERT(cnp->cn_nameiop == LOOKUP, ("nameiop must be LOOKUP")); KASSERT(dp->v_type == VDIR, ("dp is not a directory")); if (!(cnp->cn_flags & LOCKLEAF)) VOP_UNLOCK(dp); *vpp = dp; /* XXX This should probably move to the top of function. */ if (refstart) panic("lookup: SAVESTART"); return (0); } if (cnp->cn_flags & ISDOTDOT) panic ("relookup: lookup on dot-dot"); /* * We now have a segment name to search for, and a directory to search. */ if ((error = VOP_LOOKUP(dp, vpp, cnp)) != 0) { KASSERT(*vpp == NULL, ("leaf should be empty")); if (error != EJUSTRETURN) goto bad; /* * If creating and at end of pathname, then can consider * allowing file to be created. */ if (rdonly) { error = EROFS; goto bad; } /* ASSERT(dvp == ndp->ni_startdir) */ if (refstart) VREF(dvp); if ((cnp->cn_flags & LOCKPARENT) == 0) VOP_UNLOCK(dp); /* * We return with ni_vp NULL to indicate that the entry * doesn't currently exist, leaving a pointer to the * (possibly locked) directory vnode in ndp->ni_dvp. */ return (0); } dp = *vpp; /* * Disallow directory write attempts on read-only filesystems. */ if (rdonly && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { if (dvp == dp) vrele(dvp); else vput(dvp); error = EROFS; goto bad; } /* * Set the parent lock/ref state to the requested state. */ if ((cnp->cn_flags & LOCKPARENT) == 0 && dvp != dp) VOP_UNLOCK(dvp); /* * Check for symbolic link */ KASSERT(dp->v_type != VLNK || !(cnp->cn_flags & FOLLOW), ("relookup: symlink found.\n")); /* ASSERT(dvp == ndp->ni_startdir) */ if (refstart) VREF(dvp); if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp); return (0); bad: vput(dp); *vpp = NULL; return (error); } #ifdef INVARIANTS /* * Validate the final state of ndp after the lookup. */ static void NDVALIDATE_impl(struct nameidata *ndp, int line) { struct componentname *cnp; cnp = &ndp->ni_cnd; if (cnp->cn_pnbuf == NULL) panic("%s: got no buf! called from %d", __func__, line); } #endif diff --git a/sys/sys/namei.h b/sys/sys/namei.h index bbaa71f629e6..4a16ec923bd8 100644 --- a/sys/sys/namei.h +++ b/sys/sys/namei.h @@ -1,316 +1,319 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1985, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_NAMEI_H_ #define _SYS_NAMEI_H_ #include #include #include #include #include #include enum nameiop { LOOKUP, CREATE, DELETE, RENAME }; struct componentname { /* * Arguments to lookup. */ u_int64_t cn_flags; /* flags to namei */ struct ucred *cn_cred; /* credentials */ enum nameiop cn_nameiop; /* namei operation */ int cn_lkflags; /* Lock flags LK_EXCLUSIVE or LK_SHARED */ /* * Shared between lookup and commit routines. */ char *cn_pnbuf; /* pathname buffer */ char *cn_nameptr; /* pointer to looked up name */ long cn_namelen; /* length of looked up component */ }; struct nameicap_tracker; TAILQ_HEAD(nameicap_tracker_head, nameicap_tracker); /* * Encapsulation of namei parameters. */ struct nameidata { /* * Arguments to namei/lookup. */ const char *ni_dirp; /* pathname pointer */ enum uio_seg ni_segflg; /* location of pathname */ const cap_rights_t *ni_rightsneeded; /* rights needed to look up vnode */ /* * Arguments to lookup. */ struct vnode *ni_startdir; /* starting directory */ struct vnode *ni_rootdir; /* logical root directory */ struct vnode *ni_topdir; /* logical top directory */ int ni_dirfd; /* starting directory for *at functions */ int ni_lcf; /* local call flags */ /* * Results: returned from namei */ struct filecaps ni_filecaps; /* rights the *at base has */ /* * Results: returned from/manipulated by lookup */ struct vnode *ni_vp; /* vnode of result */ struct vnode *ni_dvp; /* vnode of intermediate directory */ /* * Results: flags returned from namei */ u_int ni_resflags; /* * Debug for validating API use by the callers. */ u_short ni_debugflags; /* * Shared between namei and lookup/commit routines. */ u_short ni_loopcnt; /* count of symlinks encountered */ size_t ni_pathlen; /* remaining chars in path */ char *ni_next; /* next location in pathname */ /* * Lookup parameters: this structure describes the subset of * information from the nameidata structure that is passed * through the VOP interface. */ struct componentname ni_cnd; struct nameicap_tracker_head ni_cap_tracker; /* * Private helper data for UFS, must be at the end. See * NDINIT_PREFILL(). */ seqc_t ni_dvp_seqc; seqc_t ni_vp_seqc; }; #ifdef _KERNEL enum cache_fpl_status { CACHE_FPL_STATUS_DESTROYED, CACHE_FPL_STATUS_ABORTED, CACHE_FPL_STATUS_PARTIAL, CACHE_FPL_STATUS_HANDLED, CACHE_FPL_STATUS_UNSET }; int cache_fplookup(struct nameidata *ndp, enum cache_fpl_status *status, struct pwd **pwdp); /* * Flags for namei. * * If modifying the list make sure to check whether NDVALIDATE needs updating. */ /* * Debug. */ #define NAMEI_DBG_INITED 0x0001 #define NAMEI_DBG_CALLED 0x0002 #define NAMEI_DBG_HADSTARTDIR 0x0004 /* * namei operational modifier flags, stored in ni_cnd.flags */ #define NC_NOMAKEENTRY 0x0001 /* name must not be added to cache */ #define NC_KEEPPOSENTRY 0x0002 /* don't evict a positive entry */ #define NOCACHE NC_NOMAKEENTRY /* for compatibility with older code */ #define LOCKLEAF 0x0004 /* lock vnode on return */ #define LOCKPARENT 0x0008 /* want parent vnode returned locked */ #define WANTPARENT 0x0010 /* want parent vnode returned unlocked */ #define FAILIFEXISTS 0x0020 /* return EEXIST if found */ #define FOLLOW 0x0040 /* follow symbolic links */ #define EMPTYPATH 0x0080 /* Allow empty path for *at */ #define LOCKSHARED 0x0100 /* Shared lock leaf */ #define NOFOLLOW 0x0000 /* do not follow symbolic links (pseudo) */ #define RBENEATH 0x100000000ULL /* No escape, even tmp, from start dir */ +#define NAMEILOOKUP 0x200000000ULL /* cnp is embedded in nameidata */ #define MODMASK 0xf000001ffULL /* mask of operational modifiers */ /* * Namei parameter descriptors. */ #define RDONLY 0x00000200 /* lookup with read-only semantics */ #define ISRESTARTED 0x00000400 /* restarted namei */ #define IGNOREWHITEOUT 0x00000800 /* ignore whiteouts, e.g. when checking if a dir is empty */ #define ISWHITEOUT 0x00001000 /* found whiteout */ #define DOWHITEOUT 0x00002000 /* do whiteouts */ #define WILLBEDIR 0x00004000 /* new files will be dirs; allow trailing / */ #define ISOPEN 0x00008000 /* caller is opening; return a real vnode. */ #define NOCROSSMOUNT 0x00010000 /* do not cross mount points */ #define NOMACCHECK 0x00020000 /* do not perform MAC checks */ #define AUDITVNODE1 0x00040000 /* audit the looked up vnode information */ #define AUDITVNODE2 0x00080000 /* audit the looked up vnode information */ #define NOCAPCHECK 0x00100000 /* do not perform capability checks */ #define OPENREAD 0x00200000 /* open for reading */ #define OPENWRITE 0x00400000 /* open for writing */ #define WANTIOCTLCAPS 0x00800000 /* leave ioctl caps for the caller */ #define OPENNAMED 0x01000000 /* opening a named attribute (dir) */ #define NOEXECCHECK 0x02000000 /* do not perform exec check on dir */ #define MAKEENTRY 0x04000000 /* entry is to be added to name cache */ #define ISSYMLINK 0x08000000 /* symlink needs interpretation */ #define ISLASTCN 0x10000000 /* this is last component of pathname */ #define ISDOTDOT 0x20000000 /* current component name is .. */ #define TRAILINGSLASH 0x40000000 /* path ended in a slash */ #define CREATENAMED 0x80000000 /* create a named attribute dir */ #define PARAMASK 0xfffffe00 /* mask of parameter descriptors */ /* * Flags which must not be passed in by callers. */ #define NAMEI_INTERNAL_FLAGS \ (NOEXECCHECK | MAKEENTRY | ISSYMLINK | ISLASTCN | ISDOTDOT | \ TRAILINGSLASH | ISRESTARTED) /* * Namei results flags */ #define NIRES_ABS 0x00000001 /* Path was absolute */ #define NIRES_STRICTREL 0x00000002 /* Restricted lookup result */ #define NIRES_EMPTYPATH 0x00000004 /* EMPTYPATH used */ /* * Flags in ni_lcf, valid for the duration of the namei call. */ #define NI_LCF_STRICTREL 0x0001 /* relative lookup only */ #define NI_LCF_CAP_DOTDOT 0x0002 /* ".." in strictrelative case */ /* Track capability restrictions seperately for violation ktracing. */ #define NI_LCF_STRICTREL_KTR 0x0004 /* trace relative lookups */ #define NI_LCF_CAP_DOTDOT_KTR 0x0008 /* ".." in strictrelative case */ #define NI_LCF_KTR_FLAGS (NI_LCF_STRICTREL_KTR | NI_LCF_CAP_DOTDOT_KTR) /* * Initialization of a nameidata structure. */ #define NDINIT(ndp, op, flags, segflg, namep) \ NDINIT_ALL(ndp, op, flags, segflg, namep, AT_FDCWD, NULL, &cap_no_rights) #define NDINIT_AT(ndp, op, flags, segflg, namep, dirfd) \ NDINIT_ALL(ndp, op, flags, segflg, namep, dirfd, NULL, &cap_no_rights) #define NDINIT_ATRIGHTS(ndp, op, flags, segflg, namep, dirfd, rightsp) \ NDINIT_ALL(ndp, op, flags, segflg, namep, dirfd, NULL, rightsp) #define NDINIT_ATVP(ndp, op, flags, segflg, namep, vp) \ NDINIT_ALL(ndp, op, flags, segflg, namep, AT_FDCWD, vp, &cap_no_rights) /* * Note the constant pattern may *hide* bugs. * Note also that we enable debug checks for non-TIED KLDs * so that they can run on an INVARIANTS kernel without tripping over * assertions on ni_debugflags state. */ #if defined(INVARIANTS) || (defined(KLD_MODULE) && !defined(KLD_TIED)) #define NDINIT_PREFILL(arg) memset(arg, 0xff, offsetof(struct nameidata, \ ni_dvp_seqc)) #define NDINIT_DBG(arg) { (arg)->ni_debugflags = NAMEI_DBG_INITED; } #define NDREINIT_DBG(arg) { \ if (((arg)->ni_debugflags & NAMEI_DBG_INITED) == 0) \ panic("namei data not inited"); \ if (((arg)->ni_debugflags & NAMEI_DBG_HADSTARTDIR) != 0) \ panic("NDREINIT on namei data with NAMEI_DBG_HADSTARTDIR"); \ (arg)->ni_debugflags = NAMEI_DBG_INITED; \ } #else #define NDINIT_PREFILL(arg) do { } while (0) #define NDINIT_DBG(arg) do { } while (0) #define NDREINIT_DBG(arg) do { } while (0) #endif #define NDINIT_ALL(ndp, op, flags, segflg, namep, dirfd, startdir, rightsp) \ do { \ struct nameidata *_ndp = (ndp); \ const cap_rights_t *_rightsp = (rightsp); \ MPASS(_rightsp != NULL); \ NDINIT_PREFILL(_ndp); \ NDINIT_DBG(_ndp); \ _ndp->ni_cnd.cn_nameiop = op; \ - _ndp->ni_cnd.cn_flags = flags; \ + _ndp->ni_cnd.cn_flags = (flags) | NAMEILOOKUP; \ _ndp->ni_segflg = segflg; \ _ndp->ni_dirp = namep; \ _ndp->ni_dirfd = dirfd; \ _ndp->ni_startdir = startdir; \ _ndp->ni_resflags = 0; \ filecaps_init(&_ndp->ni_filecaps); \ _ndp->ni_rightsneeded = _rightsp; \ } while (0) #define NDREINIT(ndp) do { \ struct nameidata *_ndp = (ndp); \ NDREINIT_DBG(_ndp); \ filecaps_free(&_ndp->ni_filecaps); \ _ndp->ni_resflags = 0; \ _ndp->ni_startdir = NULL; \ } while (0) #define NDPREINIT(ndp) do { \ (ndp)->ni_dvp_seqc = SEQC_MOD; \ (ndp)->ni_vp_seqc = SEQC_MOD; \ } while (0) #define NDFREE_IOCTLCAPS(ndp) do { \ struct nameidata *_ndp = (ndp); \ filecaps_free(&_ndp->ni_filecaps); \ } while (0) #define NDFREE_PNBUF(ndp) do { \ struct nameidata *_ndp = (ndp); \ MPASS(_ndp->ni_cnd.cn_pnbuf != NULL); \ uma_zfree(namei_zone, _ndp->ni_cnd.cn_pnbuf); \ _ndp->ni_cnd.cn_pnbuf = NULL; \ } while (0) int namei(struct nameidata *ndp); int vfs_lookup(struct nameidata *ndp); +bool vfs_lookup_isroot(struct nameidata *ndp, struct vnode *dvp); +struct nameidata *vfs_lookup_nameidata(struct componentname *cnp); int vfs_relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, bool refstart); #define namei_setup_rootdir(ndp, cnp, pwd) do { \ if (__predict_true((cnp->cn_flags & ISRESTARTED) == 0)) \ ndp->ni_rootdir = pwd->pwd_adir; \ else \ ndp->ni_rootdir = pwd->pwd_rdir; \ } while (0) #endif /* * Stats on usefulness of namei caches. */ struct nchstats { long ncs_goodhits; /* hits that we can really use */ long ncs_neghits; /* negative hits that we can use */ long ncs_badhits; /* hits we must drop */ long ncs_falsehits; /* hits with id mismatch */ long ncs_miss; /* misses */ long ncs_long; /* long names that ignore cache */ long ncs_pass2; /* names found with passes == 2 */ long ncs_2passes; /* number of times we attempt it */ }; extern struct nchstats nchstats; #endif /* !_SYS_NAMEI_H_ */