diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h index 3872cd1c0b13..ec920f8b0537 100644 --- a/sys/fs/tmpfs/tmpfs.h +++ b/sys/fs/tmpfs/tmpfs.h @@ -1,538 +1,537 @@ /* $NetBSD: tmpfs.h,v 1.26 2007/02/22 06:37:00 thorpej Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Julio M. Merino Vidal, developed as part of Google's Summer of Code * 2005 program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FS_TMPFS_TMPFS_H_ #define _FS_TMPFS_TMPFS_H_ #include #include #include #ifdef _SYS_MALLOC_H_ -MALLOC_DECLARE(M_TMPFSMNT); MALLOC_DECLARE(M_TMPFSNAME); #endif /* * Internal representation of a tmpfs directory entry. */ LIST_HEAD(tmpfs_dir_duphead, tmpfs_dirent); struct tmpfs_dirent { /* * Depending on td_cookie flag entry can be of 3 types: * - regular -- no hash collisions, stored in RB-Tree * - duphead -- synthetic linked list head for dup entries * - dup -- stored in linked list instead of RB-Tree */ union { /* regular and duphead entry types */ RB_ENTRY(tmpfs_dirent) td_entries; /* dup entry type */ struct { LIST_ENTRY(tmpfs_dirent) entries; LIST_ENTRY(tmpfs_dirent) index_entries; } td_dup; } uh; uint32_t td_cookie; uint32_t td_hash; u_int td_namelen; /* * Pointer to the node this entry refers to. In case this field * is NULL, the node is a whiteout. */ struct tmpfs_node * td_node; union { /* * The name of the entry, allocated from a string pool. This * string is not required to be zero-terminated. */ char * td_name; /* regular, dup */ struct tmpfs_dir_duphead td_duphead; /* duphead */ } ud; }; /* * A directory in tmpfs holds a collection of directory entries, which * in turn point to other files (which can be directories themselves). * * In tmpfs, this collection is managed by a RB-Tree, whose head is * defined by the struct tmpfs_dir type. * * It is important to notice that directories do not have entries for . and * .. as other file systems do. These can be generated when requested * based on information available by other means, such as the pointer to * the node itself in the former case or the pointer to the parent directory * in the latter case. This is done to simplify tmpfs's code and, more * importantly, to remove redundancy. */ RB_HEAD(tmpfs_dir, tmpfs_dirent); /* * Each entry in a directory has a cookie that identifies it. Cookies * supersede offsets within directories because, given how tmpfs stores * directories in memory, there is no such thing as an offset. * * The '.', '..' and the end of directory markers have fixed cookies which * cannot collide with the cookies generated by other entries. The cookies * for the other entries are generated based on the file name hash value or * unique number in case of name hash collision. * * To preserve compatibility cookies are limited to 31 bits. */ #define TMPFS_DIRCOOKIE_DOT 0 #define TMPFS_DIRCOOKIE_DOTDOT 1 #define TMPFS_DIRCOOKIE_EOF 2 #define TMPFS_DIRCOOKIE_MASK ((off_t)0x3fffffffU) #define TMPFS_DIRCOOKIE_MIN ((off_t)0x00000004U) #define TMPFS_DIRCOOKIE_DUP ((off_t)0x40000000U) #define TMPFS_DIRCOOKIE_DUPHEAD ((off_t)0x80000000U) #define TMPFS_DIRCOOKIE_DUP_MIN TMPFS_DIRCOOKIE_DUP #define TMPFS_DIRCOOKIE_DUP_MAX \ (TMPFS_DIRCOOKIE_DUP | TMPFS_DIRCOOKIE_MASK) /* * Internal representation of a tmpfs file system node. * * This structure is splitted in two parts: one holds attributes common * to all file types and the other holds data that is only applicable to * a particular type. The code must be careful to only access those * attributes that are actually allowed by the node's type. * * Below is the key of locks used to protected the fields in the following * structures. * (v) vnode lock in exclusive mode * (vi) vnode lock in exclusive mode, or vnode lock in shared vnode and * tn_interlock * (i) tn_interlock * (m) tmpfs_mount tm_allnode_lock * (c) stable after creation */ struct tmpfs_node { /* * Doubly-linked list entry which links all existing nodes for * a single file system. This is provided to ease the removal * of all nodes during the unmount operation, and to support * the implementation of VOP_VNTOCNP(). tn_attached is false * when the node is removed from list and unlocked. */ LIST_ENTRY(tmpfs_node) tn_entries; /* (m) */ bool tn_attached; /* (m) */ /* * The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO', * 'VLNK', 'VREG' and 'VSOCK' is allowed. The usage of vnode * types instead of a custom enumeration is to make things simpler * and faster, as we do not need to convert between two types. */ enum vtype tn_type; /* (c) */ /* Node identifier. */ ino_t tn_id; /* (c) */ /* * Node's internal status. This is used by several file system * operations to do modifications to the node in a delayed * fashion. */ int tn_status; /* (vi) */ #define TMPFS_NODE_ACCESSED (1 << 1) #define TMPFS_NODE_MODIFIED (1 << 2) #define TMPFS_NODE_CHANGED (1 << 3) /* * The node size. It does not necessarily match the real amount * of memory consumed by it. */ off_t tn_size; /* (v) */ /* Generic node attributes. */ uid_t tn_uid; /* (v) */ gid_t tn_gid; /* (v) */ mode_t tn_mode; /* (v) */ int tn_links; /* (v) */ u_long tn_flags; /* (v) */ struct timespec tn_atime; /* (vi) */ struct timespec tn_mtime; /* (vi) */ struct timespec tn_ctime; /* (vi) */ struct timespec tn_birthtime; /* (v) */ unsigned long tn_gen; /* (c) */ /* * As there is a single vnode for each active file within the * system, care has to be taken to avoid allocating more than one * vnode per file. In order to do this, a bidirectional association * is kept between vnodes and nodes. * * Whenever a vnode is allocated, its v_data field is updated to * point to the node it references. At the same time, the node's * tn_vnode field is modified to point to the new vnode representing * it. Further attempts to allocate a vnode for this same node will * result in returning a new reference to the value stored in * tn_vnode. * * May be NULL when the node is unused (that is, no vnode has been * allocated for it or it has been reclaimed). */ struct vnode * tn_vnode; /* (i) */ /* * Interlock to protect tn_vpstate, and tn_status under shared * vnode lock. */ struct mtx tn_interlock; /* * Identify if current node has vnode assiocate with * or allocating vnode. */ int tn_vpstate; /* (i) */ /* Transient refcounter on this node. */ u_int tn_refcount; /* (m) + (i) */ /* misc data field for different tn_type node */ union { /* Valid when tn_type == VBLK || tn_type == VCHR. */ dev_t tn_rdev; /* (c) */ /* Valid when tn_type == VDIR. */ struct tn_dir { /* * Pointer to the parent directory. The root * directory has a pointer to itself in this field; * this property identifies the root node. */ struct tmpfs_node * tn_parent; /* * Head of a tree that links the contents of * the directory together. */ struct tmpfs_dir tn_dirhead; /* * Head of a list the contains fake directory entries * heads, i.e. entries with TMPFS_DIRCOOKIE_DUPHEAD * flag. */ struct tmpfs_dir_duphead tn_dupindex; /* * Number and pointer of the first directory entry * returned by the readdir operation if it were * called again to continue reading data from the * same directory as before. This is used to speed * up reads of long directories, assuming that no * more than one read is in progress at a given time. * Otherwise, these values are discarded. */ off_t tn_readdir_lastn; struct tmpfs_dirent * tn_readdir_lastp; } tn_dir; /* Valid when tn_type == VLNK. */ /* The link's target, allocated from a string pool. */ char * tn_link; /* (c) */ /* Valid when tn_type == VREG. */ struct tn_reg { /* * The contents of regular files stored in a * tmpfs file system are represented by a * single anonymous memory object (aobj, for * short). The aobj provides direct access to * any position within the file. It is a task * of the memory management subsystem to issue * the required page ins or page outs whenever * a position within the file is accessed. */ vm_object_t tn_aobj; /* (c) */ } tn_reg; } tn_spec; /* (v) */ }; LIST_HEAD(tmpfs_node_list, tmpfs_node); #define tn_rdev tn_spec.tn_rdev #define tn_dir tn_spec.tn_dir #define tn_link tn_spec.tn_link #define tn_reg tn_spec.tn_reg #define tn_fifo tn_spec.tn_fifo #define TMPFS_LINK_MAX INT_MAX #define TMPFS_NODE_LOCK(node) mtx_lock(&(node)->tn_interlock) #define TMPFS_NODE_UNLOCK(node) mtx_unlock(&(node)->tn_interlock) #define TMPFS_NODE_MTX(node) (&(node)->tn_interlock) #define TMPFS_NODE_ASSERT_LOCKED(node) mtx_assert(TMPFS_NODE_MTX(node), \ MA_OWNED) #ifdef INVARIANTS #define TMPFS_ASSERT_LOCKED(node) do { \ MPASS((node) != NULL); \ MPASS((node)->tn_vnode != NULL); \ ASSERT_VOP_LOCKED((node)->tn_vnode, "tmpfs assert"); \ } while (0) #else #define TMPFS_ASSERT_LOCKED(node) (void)0 #endif #define TMPFS_VNODE_ALLOCATING 1 #define TMPFS_VNODE_WANT 2 #define TMPFS_VNODE_DOOMED 4 #define TMPFS_VNODE_WRECLAIM 8 /* * Internal representation of a tmpfs mount point. */ struct tmpfs_mount { /* * Original value of the "size" parameter, for reference purposes, * mostly. */ off_t tm_size_max; /* * Maximum number of memory pages available for use by the file * system, set during mount time. This variable must never be * used directly as it may be bigger than the current amount of * free memory; in the extreme case, it will hold the ULONG_MAX * value. */ u_long tm_pages_max; /* Number of pages in use by the file system. */ u_long tm_pages_used; /* * Pointer to the node representing the root directory of this * file system. */ struct tmpfs_node * tm_root; /* * Maximum number of possible nodes for this file system; set * during mount time. We need a hard limit on the maximum number * of nodes to avoid allocating too much of them; their objects * cannot be released until the file system is unmounted. * Otherwise, we could easily run out of memory by creating lots * of empty files and then simply removing them. */ ino_t tm_nodes_max; /* unrhdr used to allocate inode numbers */ struct unrhdr64 tm_ino_unr; /* Number of nodes currently that are in use. */ ino_t tm_nodes_inuse; /* Refcounter on this struct tmpfs_mount. */ uint64_t tm_refcount; /* maximum representable file size */ u_int64_t tm_maxfilesize; /* * The used list contains all nodes that are currently used by * the file system; i.e., they refer to existing files. */ struct tmpfs_node_list tm_nodes_used; /* All node lock to protect the node list and tmp_pages_used. */ struct mtx tm_allnode_lock; /* Read-only status. */ bool tm_ronly; /* Do not use namecache. */ bool tm_nonc; }; #define TMPFS_LOCK(tm) mtx_lock(&(tm)->tm_allnode_lock) #define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->tm_allnode_lock) #define TMPFS_MP_ASSERT_LOCKED(tm) mtx_assert(&(tm)->tm_allnode_lock, MA_OWNED) /* * This structure maps a file identifier to a tmpfs node. Used by the * NFS code. */ struct tmpfs_fid_data { ino_t tfd_id; unsigned long tfd_gen; }; _Static_assert(sizeof(struct tmpfs_fid_data) <= MAXFIDSZ, "(struct tmpfs_fid_data) is larger than (struct fid).fid_data"); struct tmpfs_dir_cursor { struct tmpfs_dirent *tdc_current; struct tmpfs_dirent *tdc_tree; }; #ifdef _KERNEL /* * Prototypes for tmpfs_subr.c. */ void tmpfs_ref_node(struct tmpfs_node *node); void tmpfs_ref_node_locked(struct tmpfs_node *node); int tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *, enum vtype, uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *, char *, dev_t, struct tmpfs_node **); void tmpfs_free_node(struct tmpfs_mount *, struct tmpfs_node *); bool tmpfs_free_node_locked(struct tmpfs_mount *, struct tmpfs_node *, bool); void tmpfs_free_tmp(struct tmpfs_mount *); int tmpfs_alloc_dirent(struct tmpfs_mount *, struct tmpfs_node *, const char *, u_int, struct tmpfs_dirent **); void tmpfs_free_dirent(struct tmpfs_mount *, struct tmpfs_dirent *); void tmpfs_dirent_init(struct tmpfs_dirent *, const char *, u_int); void tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj); int tmpfs_alloc_vp(struct mount *, struct tmpfs_node *, int, struct vnode **); void tmpfs_free_vp(struct vnode *); int tmpfs_alloc_file(struct vnode *, struct vnode **, struct vattr *, struct componentname *, char *); void tmpfs_check_mtime(struct vnode *); void tmpfs_dir_attach(struct vnode *, struct tmpfs_dirent *); void tmpfs_dir_detach(struct vnode *, struct tmpfs_dirent *); void tmpfs_dir_destroy(struct tmpfs_mount *, struct tmpfs_node *); struct tmpfs_dirent * tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f, struct componentname *cnp); int tmpfs_dir_getdents(struct tmpfs_mount *, struct tmpfs_node *, struct uio *, int, u_long *, int *); int tmpfs_dir_whiteout_add(struct vnode *, struct componentname *); void tmpfs_dir_whiteout_remove(struct vnode *, struct componentname *); int tmpfs_reg_resize(struct vnode *, off_t, boolean_t); int tmpfs_chflags(struct vnode *, u_long, struct ucred *, struct thread *); int tmpfs_chmod(struct vnode *, mode_t, struct ucred *, struct thread *); int tmpfs_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *); int tmpfs_chsize(struct vnode *, u_quad_t, struct ucred *, struct thread *); int tmpfs_chtimes(struct vnode *, struct vattr *, struct ucred *cred, struct thread *); void tmpfs_itimes(struct vnode *, const struct timespec *, const struct timespec *); void tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status); void tmpfs_update(struct vnode *); int tmpfs_truncate(struct vnode *, off_t); struct tmpfs_dirent *tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc); struct tmpfs_dirent *tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc); /* * Convenience macros to simplify some logical expressions. */ #define IMPLIES(a, b) (!(a) || (b)) #define IFF(a, b) (IMPLIES(a, b) && IMPLIES(b, a)) /* * Checks that the directory entry pointed by 'de' matches the name 'name' * with a length of 'len'. */ #define TMPFS_DIRENT_MATCHES(de, name, len) \ (de->td_namelen == len && \ bcmp((de)->ud.td_name, (name), (de)->td_namelen) == 0) /* * Ensures that the node pointed by 'node' is a directory and that its * contents are consistent with respect to directories. */ #define TMPFS_VALIDATE_DIR(node) do { \ MPASS((node)->tn_type == VDIR); \ MPASS((node)->tn_size % sizeof(struct tmpfs_dirent) == 0); \ } while (0) /* * Amount of memory pages to reserve for the system (e.g., to not use by * tmpfs). */ #define TMPFS_PAGES_MINRESERVED (4 * 1024 * 1024 / PAGE_SIZE) size_t tmpfs_mem_avail(void); size_t tmpfs_pages_used(struct tmpfs_mount *tmp); void tmpfs_subr_init(void); void tmpfs_subr_uninit(void); /* * Macros/functions to convert from generic data structures to tmpfs * specific ones. */ static inline struct tmpfs_mount * VFS_TO_TMPFS(struct mount *mp) { struct tmpfs_mount *tmp; MPASS(mp != NULL && mp->mnt_data != NULL); tmp = (struct tmpfs_mount *)mp->mnt_data; return (tmp); } static inline struct tmpfs_node * VP_TO_TMPFS_NODE(struct vnode *vp) { struct tmpfs_node *node; MPASS(vp != NULL && vp->v_data != NULL); node = (struct tmpfs_node *)vp->v_data; return (node); } static inline struct tmpfs_node * VP_TO_TMPFS_DIR(struct vnode *vp) { struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); TMPFS_VALIDATE_DIR(node); return (node); } static inline bool tmpfs_use_nc(struct vnode *vp) { return (!(VFS_TO_TMPFS(vp->v_mount)->tm_nonc)); } #endif /* _KERNEL */ #endif /* _FS_TMPFS_TMPFS_H_ */ diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c index c9c211dca002..219ec9ee69f2 100644 --- a/sys/fs/tmpfs/tmpfs_vfsops.c +++ b/sys/fs/tmpfs/tmpfs_vfsops.c @@ -1,684 +1,684 @@ /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 2005 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Julio M. Merino Vidal, developed as part of Google's Summer of Code * 2005 program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Efficient memory file system. * * tmpfs is a file system that uses FreeBSD's virtual memory * sub-system to store file data and metadata in an efficient way. * This means that it does not follow the structure of an on-disk file * system because it simply does not need to. Instead, it uses * memory-specific data structures and algorithms to automatically * allocate and release resources. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Default permission for root node */ #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) -MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); +static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); static int tmpfs_mount(struct mount *); static int tmpfs_unmount(struct mount *, int); static int tmpfs_root(struct mount *, int flags, struct vnode **); static int tmpfs_fhtovp(struct mount *, struct fid *, int, struct vnode **); static int tmpfs_statfs(struct mount *, struct statfs *); static void tmpfs_susp_clean(struct mount *); static const char *tmpfs_opts[] = { "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", "union", "nonc", NULL }; static const char *tmpfs_updateopts[] = { "from", "export", "size", NULL }; /* * Handle updates of time from writes to mmaped regions. Use * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since * unmap of the tmpfs-backed vnode does not call vinactive(), due to * vm object type is OBJT_SWAP. * If lazy, only handle delayed update of mtime due to the writes to * mapped files. */ static void tmpfs_update_mtime(struct mount *mp, bool lazy) { struct vnode *vp, *mvp; struct vm_object *obj; MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type != VREG) { VI_UNLOCK(vp); continue; } obj = vp->v_object; KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); /* * In lazy case, do unlocked read, avoid taking vnode * lock if not needed. Lost update will be handled on * the next call. * For non-lazy case, we must flush all pending * metadata changes now. */ if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) { if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) != 0) continue; tmpfs_check_mtime(vp); if (!lazy) tmpfs_update(vp); vput(vp); } else { VI_UNLOCK(vp); continue; } } } struct tmpfs_check_rw_maps_arg { bool found; }; static bool tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, vm_map_entry_t entry __unused, void *arg) { struct tmpfs_check_rw_maps_arg *a; a = arg; a->found = true; return (true); } /* * Revoke write permissions from all mappings of regular files * belonging to the specified tmpfs mount. */ static bool tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, vm_map_entry_t entry, void *arg __unused) { /* * XXXKIB: might be invalidate the mapping * instead ? The process is not going to be * happy in any case. */ entry->max_protection &= ~VM_PROT_WRITE; if ((entry->protection & VM_PROT_WRITE) != 0) { entry->protection &= ~VM_PROT_WRITE; pmap_protect(map->pmap, entry->start, entry->end, entry->protection); } return (false); } static void tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, vm_map_entry_t, void *), void *cb_arg) { struct proc *p; struct vmspace *vm; vm_map_t map; vm_map_entry_t entry; vm_object_t object; struct vnode *vp; int gen; bool terminate; terminate = false; sx_slock(&allproc_lock); again: gen = allproc_gen; FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) != 0) { PROC_UNLOCK(p); continue; } vm = vmspace_acquire_ref(p); _PHOLD_LITE(p); PROC_UNLOCK(p); if (vm == NULL) { PRELE(p); continue; } sx_sunlock(&allproc_lock); map = &vm->vm_map; vm_map_lock(map); if (map->busy) vm_map_wait_busy(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { if ((entry->eflags & (MAP_ENTRY_GUARD | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || (entry->max_protection & VM_PROT_WRITE) == 0) continue; object = entry->object.vm_object; if (object == NULL || object->type != OBJT_SWAP || (object->flags & OBJ_TMPFS_NODE) == 0) continue; /* * No need to dig into shadow chain, mapping * of the object not at top is readonly. */ VM_OBJECT_RLOCK(object); if (object->type == OBJT_DEAD) { VM_OBJECT_RUNLOCK(object); continue; } MPASS(object->ref_count > 1); if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != (OBJ_TMPFS_NODE | OBJ_TMPFS)) { VM_OBJECT_RUNLOCK(object); continue; } vp = object->un_pager.swp.swp_tmpfs; if (vp->v_mount != mp) { VM_OBJECT_RUNLOCK(object); continue; } terminate = cb(mp, map, entry, cb_arg); VM_OBJECT_RUNLOCK(object); if (terminate) break; } vm_map_unlock(map); vmspace_free(vm); sx_slock(&allproc_lock); PRELE(p); if (terminate) break; } if (!terminate && gen != allproc_gen) goto again; sx_sunlock(&allproc_lock); } static bool tmpfs_check_rw_maps(struct mount *mp) { struct tmpfs_check_rw_maps_arg ca; ca.found = false; tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); return (ca.found); } static int tmpfs_rw_to_ro(struct mount *mp) { int error, flags; bool forced; forced = (mp->mnt_flag & MNT_FORCE) != 0; flags = WRITECLOSE | (forced ? FORCECLOSE : 0); if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) return (error); error = vfs_write_suspend_umnt(mp); if (error != 0) return (error); if (!forced && tmpfs_check_rw_maps(mp)) { error = EBUSY; goto out; } VFS_TO_TMPFS(mp)->tm_ronly = 1; MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); for (;;) { tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); tmpfs_update_mtime(mp, false); error = vflush(mp, 0, flags, curthread); if (error != 0) { VFS_TO_TMPFS(mp)->tm_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); goto out; } if (!tmpfs_check_rw_maps(mp)) break; } out: vfs_write_resume(mp, 0); return (error); } static int tmpfs_mount(struct mount *mp) { const size_t nodes_per_page = howmany(PAGE_SIZE, sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); struct tmpfs_mount *tmp; struct tmpfs_node *root; int error; bool nonc; /* Size counters. */ u_quad_t pages; off_t nodes_max, size_max, maxfilesize; /* Root node attributes. */ uid_t root_uid; gid_t root_gid; mode_t root_mode; struct vattr va; if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) return (EINVAL); if (mp->mnt_flag & MNT_UPDATE) { /* Only support update mounts for certain options. */ if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) return (EOPNOTSUPP); tmp = VFS_TO_TMPFS(mp); if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { /* * On-the-fly resizing is not supported (yet). We still * need to have "size" listed as "supported", otherwise * trying to update fs that is listed in fstab with size * parameter, say trying to change rw to ro or vice * versa, would cause vfs_filteropt() to bail. */ if (size_max != tmp->tm_size_max) return (EOPNOTSUPP); } if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && !tmp->tm_ronly) { /* RW -> RO */ return (tmpfs_rw_to_ro(mp)); } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && tmp->tm_ronly) { /* RO -> RW */ tmp->tm_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } return (0); } vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); VOP_UNLOCK(mp->mnt_vnodecovered, 0); if (error) return (error); if (mp->mnt_cred->cr_ruid != 0 || vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) root_gid = va.va_gid; if (mp->mnt_cred->cr_ruid != 0 || vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) root_uid = va.va_uid; if (mp->mnt_cred->cr_ruid != 0 || vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) root_mode = va.va_mode; if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) nodes_max = 0; if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) size_max = 0; if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) maxfilesize = 0; nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; /* Do not allow mounts if we do not have enough memory to preserve * the minimum reserved pages. */ if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) return (ENOSPC); /* Get the maximum number of memory pages this file system is * allowed to use, based on the maximum size the user passed in * the mount structure. A value of zero is treated as if the * maximum available space was requested. */ if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) pages = SIZE_MAX; else { size_max = roundup(size_max, PAGE_SIZE); pages = howmany(size_max, PAGE_SIZE); } MPASS(pages > 0); if (nodes_max <= 3) { if (pages < INT_MAX / nodes_per_page) nodes_max = pages * nodes_per_page; else nodes_max = INT_MAX; } if (nodes_max > INT_MAX) nodes_max = INT_MAX; MPASS(nodes_max >= 3); /* Allocate the tmpfs mount structure and fill it. */ tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), M_TMPFSMNT, M_WAITOK | M_ZERO); mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); tmp->tm_nodes_max = nodes_max; tmp->tm_nodes_inuse = 0; tmp->tm_refcount = 1; tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; LIST_INIT(&tmp->tm_nodes_used); tmp->tm_size_max = size_max; tmp->tm_pages_max = pages; tmp->tm_pages_used = 0; new_unrhdr64(&tmp->tm_ino_unr, 2); tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; tmp->tm_nonc = nonc; /* Allocate the root node. */ error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); if (error != 0 || root == NULL) { free(tmp, M_TMPFSMNT); return (error); } KASSERT(root->tn_id == 2, ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); tmp->tm_root = root; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_TEXT_REFS; MNT_IUNLOCK(mp); mp->mnt_data = tmp; mp->mnt_stat.f_namemax = MAXNAMLEN; vfs_getnewfsid(mp); vfs_mountedfrom(mp, "tmpfs"); return 0; } /* ARGSUSED2 */ static int tmpfs_unmount(struct mount *mp, int mntflags) { struct tmpfs_mount *tmp; struct tmpfs_node *node; int error, flags; flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; tmp = VFS_TO_TMPFS(mp); /* Stop writers */ error = vfs_write_suspend_umnt(mp); if (error != 0) return (error); /* * At this point, nodes cannot be destroyed by any other * thread because write suspension is started. */ for (;;) { error = vflush(mp, 0, flags, curthread); if (error != 0) { vfs_write_resume(mp, VR_START_WRITE); return (error); } MNT_ILOCK(mp); if (mp->mnt_nvnodelistsize == 0) { MNT_IUNLOCK(mp); break; } MNT_IUNLOCK(mp); if ((mntflags & MNT_FORCE) == 0) { vfs_write_resume(mp, VR_START_WRITE); return (EBUSY); } } TMPFS_LOCK(tmp); while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { TMPFS_NODE_LOCK(node); if (node->tn_type == VDIR) tmpfs_dir_destroy(tmp, node); if (tmpfs_free_node_locked(tmp, node, true)) TMPFS_LOCK(tmp); else TMPFS_NODE_UNLOCK(node); } mp->mnt_data = NULL; tmpfs_free_tmp(tmp); vfs_write_resume(mp, VR_START_WRITE); MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); return (0); } void tmpfs_free_tmp(struct tmpfs_mount *tmp) { TMPFS_MP_ASSERT_LOCKED(tmp); MPASS(tmp->tm_refcount > 0); tmp->tm_refcount--; if (tmp->tm_refcount > 0) { TMPFS_UNLOCK(tmp); return; } TMPFS_UNLOCK(tmp); mtx_destroy(&tmp->tm_allnode_lock); MPASS(tmp->tm_pages_used == 0); MPASS(tmp->tm_nodes_inuse == 0); free(tmp, M_TMPFSMNT); } static int tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) { int error; error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); if (error == 0) (*vpp)->v_vflag |= VV_ROOT; return (error); } static int tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct tmpfs_fid_data tfd; struct tmpfs_mount *tmp; struct tmpfs_node *node; int error; if (fhp->fid_len != sizeof(tfd)) return (EINVAL); /* * Copy from fid_data onto the stack to avoid unaligned pointer use. * See the comment in sys/mount.h on struct fid for details. */ memcpy(&tfd, fhp->fid_data, fhp->fid_len); tmp = VFS_TO_TMPFS(mp); if (tfd.tfd_id >= tmp->tm_nodes_max) return (EINVAL); TMPFS_LOCK(tmp); LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { if (node->tn_id == tfd.tfd_id && node->tn_gen == tfd.tfd_gen) { tmpfs_ref_node(node); break; } } TMPFS_UNLOCK(tmp); if (node != NULL) { error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); tmpfs_free_node(tmp, node); } else error = EINVAL; return (error); } /* ARGSUSED2 */ static int tmpfs_statfs(struct mount *mp, struct statfs *sbp) { struct tmpfs_mount *tmp; size_t used; tmp = VFS_TO_TMPFS(mp); sbp->f_iosize = PAGE_SIZE; sbp->f_bsize = PAGE_SIZE; used = tmpfs_pages_used(tmp); if (tmp->tm_pages_max != ULONG_MAX) sbp->f_blocks = tmp->tm_pages_max; else sbp->f_blocks = used + tmpfs_mem_avail(); if (sbp->f_blocks <= used) sbp->f_bavail = 0; else sbp->f_bavail = sbp->f_blocks - used; sbp->f_bfree = sbp->f_bavail; used = tmp->tm_nodes_inuse; sbp->f_files = tmp->tm_nodes_max; if (sbp->f_files <= used) sbp->f_ffree = 0; else sbp->f_ffree = sbp->f_files - used; /* sbp->f_owner = tmp->tn_uid; */ return 0; } static int tmpfs_sync(struct mount *mp, int waitfor) { if (waitfor == MNT_SUSPEND) { MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; MNT_IUNLOCK(mp); } else if (waitfor == MNT_LAZY) { tmpfs_update_mtime(mp, true); } return (0); } /* * The presence of a susp_clean method tells the VFS to track writes. */ static void tmpfs_susp_clean(struct mount *mp __unused) { } static int tmpfs_init(struct vfsconf *conf) { tmpfs_subr_init(); return (0); } static int tmpfs_uninit(struct vfsconf *conf) { tmpfs_subr_uninit(); return (0); } /* * tmpfs vfs operations. */ struct vfsops tmpfs_vfsops = { .vfs_mount = tmpfs_mount, .vfs_unmount = tmpfs_unmount, .vfs_root = tmpfs_root, .vfs_statfs = tmpfs_statfs, .vfs_fhtovp = tmpfs_fhtovp, .vfs_sync = tmpfs_sync, .vfs_susp_clean = tmpfs_susp_clean, .vfs_init = tmpfs_init, .vfs_uninit = tmpfs_uninit, }; VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);