diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h index a432f6c56739..62986b7fc525 100644 --- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h +++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vfs.h @@ -1,127 +1,127 @@ /* * Copyright (c) 2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _OPENSOLARIS_SYS_VFS_H_ #define _OPENSOLARIS_SYS_VFS_H_ #include #include #include #define rootdir rootvnode struct thread; struct vnode; typedef struct mount vfs_t; typedef int umode_t; #define vfs_flag mnt_flag #define vfs_data mnt_data #define vfs_count mnt_ref #define vfs_fsid mnt_stat.f_fsid #define vfs_bsize mnt_stat.f_bsize #define vfs_resource mnt_stat.f_mntfromname #define v_flag v_vflag #define v_vfsp v_mount #define VFS_RDONLY MNT_RDONLY #define VFS_NOSETUID MNT_NOSUID #define VFS_NOEXEC MNT_NOEXEC #define fs_vscan(vp, cr, async) (0) #define VROOT VV_ROOT #define XU_NGROUPS 16 /* * Structure defining a mount option for a filesystem. * option names are found in mntent.h */ typedef struct mntopt { char *mo_name; /* option name */ char **mo_cancel; /* list of options cancelled by this one */ char *mo_arg; /* argument string for this option */ int mo_flags; /* flags for this mount option */ void *mo_data; /* filesystem specific data */ } mntopt_t; /* * Flags that apply to mount options */ #define MO_SET 0x01 /* option is set */ #define MO_NODISPLAY 0x02 /* option not listed in mnttab */ #define MO_HASVALUE 0x04 /* option takes a value */ #define MO_IGNORE 0x08 /* option ignored by parser */ #define MO_DEFAULT MO_SET /* option is on by default */ #define MO_TAG 0x10 /* flags a tag set by user program */ #define MO_EMPTY 0x20 /* empty space in option table */ #define VFS_NOFORCEOPT 0x01 /* honor MO_IGNORE (don't set option) */ #define VFS_DISPLAY 0x02 /* Turn off MO_NODISPLAY bit for opt */ #define VFS_NODISPLAY 0x04 /* Turn on MO_NODISPLAY bit for opt */ #define VFS_CREATEOPT 0x08 /* Create the opt if it's not there */ /* * Structure holding mount option strings for the mounted file system. */ typedef struct mntopts { uint_t mo_count; /* number of entries in table */ mntopt_t *mo_list; /* list of mount options */ } mntopts_t; void vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg, int flags __unused); void vfs_clearmntopt(vfs_t *vfsp, const char *name); int vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp); int mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, - char *fspath, char *fspec, int fsflags); + char *fspath, char *fspec, int fsflags, vfs_t *parent_vfsp); typedef uint64_t vfs_feature_t; #define VFSFT_XVATTR 0x100000001 /* Supports xvattr for attrs */ #define VFSFT_CASEINSENSITIVE 0x100000002 /* Supports case-insensitive */ #define VFSFT_NOCASESENSITIVE 0x100000004 /* NOT case-sensitive */ #define VFSFT_DIRENTFLAGS 0x100000008 /* Supports dirent flags */ #define VFSFT_ACLONCREATE 0x100000010 /* Supports ACL on create */ #define VFSFT_ACEMASKONACCESS 0x100000020 /* Can use ACEMASK for access */ #define VFSFT_SYSATTR_VIEWS 0x100000040 /* Supports sysattr view i/f */ #define VFSFT_ACCESS_FILTER 0x100000080 /* dirents filtered by access */ #define VFSFT_REPARSE 0x100000100 /* Supports reparse point */ #define VFSFT_ZEROCOPY_SUPPORTED 0x100000200 /* Support loaning /returning cache buffer */ #define vfs_set_feature(vfsp, feature) do { } while (0) #define vfs_clear_feature(vfsp, feature) do { } while (0) #define vfs_has_feature(vfsp, feature) (0) #include #endif /* _OPENSOLARIS_SYS_VFS_H_ */ diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c index 53ef46fb8ce5..177a059e88a1 100644 --- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c +++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c @@ -1,279 +1,286 @@ /* * Copyright (c) 2006-2007 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_MOUNT); void vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg, int flags __unused) { struct vfsopt *opt; size_t namesize; int locked; if (!(locked = mtx_owned(MNT_MTX(vfsp)))) MNT_ILOCK(vfsp); if (vfsp->mnt_opt == NULL) { void *opts; MNT_IUNLOCK(vfsp); opts = malloc(sizeof (*vfsp->mnt_opt), M_MOUNT, M_WAITOK); MNT_ILOCK(vfsp); if (vfsp->mnt_opt == NULL) { vfsp->mnt_opt = opts; TAILQ_INIT(vfsp->mnt_opt); } else { free(opts, M_MOUNT); } } MNT_IUNLOCK(vfsp); opt = malloc(sizeof (*opt), M_MOUNT, M_WAITOK); namesize = strlen(name) + 1; opt->name = malloc(namesize, M_MOUNT, M_WAITOK); strlcpy(opt->name, name, namesize); opt->pos = -1; opt->seen = 1; if (arg == NULL) { opt->value = NULL; opt->len = 0; } else { opt->len = strlen(arg) + 1; opt->value = malloc(opt->len, M_MOUNT, M_WAITOK); bcopy(arg, opt->value, opt->len); } MNT_ILOCK(vfsp); TAILQ_INSERT_TAIL(vfsp->mnt_opt, opt, link); if (!locked) MNT_IUNLOCK(vfsp); } void vfs_clearmntopt(vfs_t *vfsp, const char *name) { int locked; if (!(locked = mtx_owned(MNT_MTX(vfsp)))) MNT_ILOCK(vfsp); vfs_deleteopt(vfsp->mnt_opt, name); if (!locked) MNT_IUNLOCK(vfsp); } int vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp) { struct vfsoptlist *opts = vfsp->mnt_optnew; int error; if (opts == NULL) return (0); error = vfs_getopt(opts, opt, (void **)argp, NULL); return (error != 0 ? 0 : 1); } int mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath, - char *fspec, int fsflags) + char *fspec, int fsflags, vfs_t *parent_vfsp) { struct vfsconf *vfsp; struct mount *mp; vnode_t *vp, *mvp; int error; ASSERT_VOP_ELOCKED(*vpp, "mount_snapshot"); vp = *vpp; *vpp = NULL; error = 0; /* * Be ultra-paranoid about making sure the type and fspath * variables will fit in our mp buffers, including the * terminating NUL. */ if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN) error = ENAMETOOLONG; if (error == 0 && (vfsp = vfs_byname_kld(fstype, td, &error)) == NULL) error = ENODEV; if (error == 0 && vp->v_type != VDIR) error = ENOTDIR; /* * We need vnode lock to protect v_mountedhere and vnode interlock * to protect v_iflag. */ if (error == 0) { VI_LOCK(vp); if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL) vp->v_iflag |= VI_MOUNT; else error = EBUSY; VI_UNLOCK(vp); } if (error != 0) { vput(vp); return (error); } vn_seqc_write_begin(vp); VOP_UNLOCK1(vp); /* * Allocate and initialize the filesystem. * We don't want regular user that triggered snapshot mount to be able * to unmount it, so pass credentials of the parent mount. */ mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred); mp->mnt_optnew = NULL; vfs_setmntopt(mp, "from", fspec, 0); mp->mnt_optnew = mp->mnt_opt; mp->mnt_opt = NULL; /* * Set the mount level flags. */ mp->mnt_flag = fsflags & MNT_UPDATEMASK; /* * Snapshots are always read-only. */ mp->mnt_flag |= MNT_RDONLY; /* * We don't want snapshots to allow access to vulnerable setuid * programs, so we turn off setuid when mounting snapshots. */ mp->mnt_flag |= MNT_NOSUID; /* * We don't want snapshots to be visible in regular * mount(8) and df(1) output. */ mp->mnt_flag |= MNT_IGNORE; error = VFS_MOUNT(mp); if (error != 0) { /* * Clear VI_MOUNT and decrement the use count "atomically", * under the vnode lock. This is not strictly required, * but makes it easier to reason about the life-cycle and * ownership of the covered vnode. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); VI_LOCK(vp); vp->v_iflag &= ~VI_MOUNT; VI_UNLOCK(vp); vn_seqc_write_end(vp); vput(vp); vfs_unbusy(mp); vfs_freeopts(mp->mnt_optnew); mp->mnt_vnodecovered = NULL; vfs_mount_destroy(mp); return (error); } if (mp->mnt_opt != NULL) vfs_freeopts(mp->mnt_opt); mp->mnt_opt = mp->mnt_optnew; (void) VFS_STATFS(mp, &mp->mnt_stat); +#ifdef VFS_SUPPORTS_EXJAIL_CLONE + /* + * Clone the mnt_exjail credentials of the parent, as required. + */ + vfs_exjail_clone(parent_vfsp, mp); +#endif + /* * Prevent external consumers of mount options from reading * mnt_optnew. */ mp->mnt_optnew = NULL; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); #ifdef FREEBSD_NAMECACHE cache_purge(vp); #endif VI_LOCK(vp); vp->v_iflag &= ~VI_MOUNT; #ifdef VIRF_MOUNTPOINT vn_irflag_set_locked(vp, VIRF_MOUNTPOINT); #endif vp->v_mountedhere = mp; VI_UNLOCK(vp); /* Put the new filesystem on the mount list. */ mtx_lock(&mountlist_mtx); TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); mtx_unlock(&mountlist_mtx); vfs_event_signal(NULL, VQ_MOUNT, 0); if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp)) panic("mount: lost mount"); vn_seqc_write_end(vp); VOP_UNLOCK1(vp); #if __FreeBSD_version >= 1300048 vfs_op_exit(mp); #endif vfs_unbusy(mp); *vpp = mvp; return (0); } /* * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it * asynchronously using a taskq. This can avoid deadlocks caused by re-entering * the file system as a result of releasing the vnode. Note, file systems * already have to handle the race where the vnode is incremented before the * inactive routine is called and does its locking. * * Warning: Excessive use of this routine can lead to performance problems. * This is because taskqs throttle back allocation if too many are created. */ void vn_rele_async(vnode_t *vp, taskq_t *taskq) { VERIFY3U(vp->v_usecount, >, 0); if (refcount_release_if_not_last(&vp->v_usecount)) { #if __FreeBSD_version < 1300045 vdrop(vp); #endif return; } VERIFY3U(taskq_dispatch((taskq_t *)taskq, (task_func_t *)vrele, vp, TQ_SLEEP), !=, 0); } diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c index cfc4bab2fa96..3c9311098696 100644 --- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c +++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c @@ -1,1397 +1,1398 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 by Delphix. All rights reserved. * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. */ /* * ZFS control directory (a.k.a. ".zfs") * * This directory provides a common location for all ZFS meta-objects. * Currently, this is only the 'snapshot' directory, but this may expand in the * future. The elements are built using the GFS primitives, as the hierarchy * does not actually exist on disk. * * For 'snapshot', we don't want to have all snapshots always mounted, because * this would take up a huge amount of space in /etc/mnttab. We have three * types of objects: * * ctldir ------> snapshotdir -------> snapshot * | * | * V * mounted fs * * The 'snapshot' node contains just enough information to lookup '..' and act * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we * perform an automount of the underlying filesystem and return the * corresponding vnode. * * All mounts are handled automatically by the kernel, but unmounts are * (currently) handled from user land. The main reason is that there is no * reliable way to auto-unmount the filesystem when it's "no longer in use". * When the user unmounts a filesystem, we call zfsctl_unmount(), which * unmounts any snapshots within the snapshot directory. * * The '.zfs', '.zfs/snapshot', and all directories created under * '.zfs/snapshot' (ie: '.zfs/snapshot/') are all GFS nodes and * share the same vfs_t as the head filesystem (what '.zfs' lives under). * * File systems mounted ontop of the GFS nodes '.zfs/snapshot/' * (ie: snapshots) are ZFS nodes and have their own unique vfs_t. * However, vnodes within these mounted on file systems have their v_vfsp * fields set to the head filesystem to make NFS happy (see * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t * so that it cannot be freed until all snapshots have been unmounted. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_namecheck.h" #include #include /* Common access mode for all virtual directories under the ctldir */ const uint16_t zfsctl_ctldir_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; /* * "Synthetic" filesystem implementation. */ /* * Assert that A implies B. */ #define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg)); static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes"); typedef struct sfs_node { char sn_name[ZFS_MAX_DATASET_NAME_LEN]; uint64_t sn_parent_id; uint64_t sn_id; } sfs_node_t; /* * Check the parent's ID as well as the node's to account for a chance * that IDs originating from different domains (snapshot IDs, artificial * IDs, znode IDs) may clash. */ static int sfs_compare_ids(struct vnode *vp, void *arg) { sfs_node_t *n1 = vp->v_data; sfs_node_t *n2 = arg; bool equal; equal = n1->sn_id == n2->sn_id && n1->sn_parent_id == n2->sn_parent_id; /* Zero means equality. */ return (!equal); } static int sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id, uint64_t id, struct vnode **vpp) { sfs_node_t search; int err; search.sn_id = id; search.sn_parent_id = parent_id; err = vfs_hash_get(mp, (uint32_t)id, flags, curthread, vpp, sfs_compare_ids, &search); return (err); } static int sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id, uint64_t id, struct vnode **vpp) { int err; KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data")); err = vfs_hash_insert(vp, (uint32_t)id, flags, curthread, vpp, sfs_compare_ids, vp->v_data); return (err); } static void sfs_vnode_remove(struct vnode *vp) { vfs_hash_remove(vp); } typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg); static int sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id, const char *tag, struct vop_vector *vops, sfs_vnode_setup_fn setup, void *arg, struct vnode **vpp) { struct vnode *vp; int error; error = sfs_vnode_get(mp, flags, parent_id, id, vpp); if (error != 0 || *vpp != NULL) { KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL, "sfs vnode with no data"); return (error); } /* Allocate a new vnode/inode. */ error = getnewvnode(tag, mp, vops, &vp); if (error != 0) { *vpp = NULL; return (error); } /* * Exclusively lock the vnode vnode while it's being constructed. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { *vpp = NULL; return (error); } setup(vp, arg); error = sfs_vnode_insert(vp, flags, parent_id, id, vpp); if (error != 0 || *vpp != NULL) { KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL, "sfs vnode with no data"); return (error); } #if __FreeBSD_version >= 1400077 vn_set_state(vp, VSTATE_CONSTRUCTED); #endif *vpp = vp; return (0); } static void sfs_print_node(sfs_node_t *node) { printf("\tname = %s\n", node->sn_name); printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id); printf("\tid = %ju\n", (uintmax_t)node->sn_id); } static sfs_node_t * sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id) { struct sfs_node *node; KASSERT(strlen(name) < sizeof (node->sn_name), ("sfs node name is too long")); KASSERT(size >= sizeof (*node), ("sfs node size is too small")); node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO); strlcpy(node->sn_name, name, sizeof (node->sn_name)); node->sn_parent_id = parent_id; node->sn_id = id; return (node); } static void sfs_destroy_node(sfs_node_t *node) { free(node, M_SFSNODES); } static void * sfs_reclaim_vnode(vnode_t *vp) { void *data; sfs_vnode_remove(vp); data = vp->v_data; vp->v_data = NULL; return (data); } static int sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap, zfs_uio_t *uio, off_t *offp) { struct dirent entry; int error; /* Reset ncookies for subsequent use of vfs_read_dirent. */ if (ap->a_ncookies != NULL) *ap->a_ncookies = 0; if (zfs_uio_resid(uio) < sizeof (entry)) return (SET_ERROR(EINVAL)); if (zfs_uio_offset(uio) < 0) return (SET_ERROR(EINVAL)); if (zfs_uio_offset(uio) == 0) { entry.d_fileno = id; entry.d_type = DT_DIR; entry.d_name[0] = '.'; entry.d_name[1] = '\0'; entry.d_namlen = 1; entry.d_reclen = sizeof (entry); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio)); if (error != 0) return (SET_ERROR(error)); } if (zfs_uio_offset(uio) < sizeof (entry)) return (SET_ERROR(EINVAL)); if (zfs_uio_offset(uio) == sizeof (entry)) { entry.d_fileno = parent_id; entry.d_type = DT_DIR; entry.d_name[0] = '.'; entry.d_name[1] = '.'; entry.d_name[2] = '\0'; entry.d_namlen = 2; entry.d_reclen = sizeof (entry); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio)); if (error != 0) return (SET_ERROR(error)); } if (offp != NULL) *offp = 2 * sizeof (entry); return (0); } /* * .zfs inode namespace * * We need to generate unique inode numbers for all files and directories * within the .zfs pseudo-filesystem. We use the following scheme: * * ENTRY ZFSCTL_INODE * .zfs 1 * .zfs/snapshot 2 * .zfs/snapshot/ objectid(snap) */ #define ZFSCTL_INO_SNAP(id) (id) static struct vop_vector zfsctl_ops_root; static struct vop_vector zfsctl_ops_snapdir; static struct vop_vector zfsctl_ops_snapshot; void zfsctl_init(void) { } void zfsctl_fini(void) { } boolean_t zfsctl_is_node(vnode_t *vp) { return (vn_matchops(vp, zfsctl_ops_root) || vn_matchops(vp, zfsctl_ops_snapdir) || vn_matchops(vp, zfsctl_ops_snapshot)); } typedef struct zfsctl_root { sfs_node_t node; sfs_node_t *snapdir; timestruc_t cmtime; } zfsctl_root_t; /* * Create the '.zfs' directory. */ void zfsctl_create(zfsvfs_t *zfsvfs) { zfsctl_root_t *dot_zfs; sfs_node_t *snapdir; vnode_t *rvp; uint64_t crtime[2]; ASSERT3P(zfsvfs->z_ctldir, ==, NULL); snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR); dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof (*dot_zfs), ".zfs", 0, ZFSCTL_INO_ROOT); dot_zfs->snapdir = snapdir; VERIFY0(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp)); VERIFY0(sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), &crtime, sizeof (crtime))); ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime); vput(rvp); zfsvfs->z_ctldir = dot_zfs; } /* * Destroy the '.zfs' directory. Only called when the filesystem is unmounted. * The nodes must not have any associated vnodes by now as they should be * vflush-ed. */ void zfsctl_destroy(zfsvfs_t *zfsvfs) { sfs_destroy_node(zfsvfs->z_ctldir->snapdir); sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir); zfsvfs->z_ctldir = NULL; } static int zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags, struct vnode **vpp) { return (VFS_ROOT(mp, flags, vpp)); } static void zfsctl_common_vnode_setup(vnode_t *vp, void *arg) { ASSERT_VOP_ELOCKED(vp, __func__); /* We support shared locking. */ VN_LOCK_ASHARE(vp); vp->v_type = VDIR; vp->v_data = arg; } static int zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags, struct vnode **vpp) { void *node; int err; node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir; err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root, zfsctl_common_vnode_setup, node, vpp); return (err); } static int zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags, struct vnode **vpp) { void *node; int err; node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir->snapdir; err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs", &zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp); return (err); } /* * Given a root znode, retrieve the associated .zfs directory. * Add a hold to the vnode and return it. */ int zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp) { int error; error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp); return (error); } /* * Common open routine. Disallow any write access. */ static int zfsctl_common_open(struct vop_open_args *ap) { int flags = ap->a_mode; if (flags & FWRITE) return (SET_ERROR(EACCES)); return (0); } /* * Common close routine. Nothing to do here. */ /* ARGSUSED */ static int zfsctl_common_close(struct vop_close_args *ap) { return (0); } /* * Common access routine. Disallow writes. */ static int zfsctl_common_access(struct vop_access_args *ap) { accmode_t accmode = ap->a_accmode; if (accmode & VWRITE) return (SET_ERROR(EACCES)); return (0); } /* * Common getattr function. Fill in basic information. */ static void zfsctl_common_getattr(vnode_t *vp, vattr_t *vap) { timestruc_t now; sfs_node_t *node; node = vp->v_data; vap->va_uid = 0; vap->va_gid = 0; vap->va_rdev = 0; /* * We are a purely virtual object, so we have no * blocksize or allocated blocks. */ vap->va_blksize = 0; vap->va_nblocks = 0; vap->va_gen = 0; vn_fsid(vp, vap); vap->va_mode = zfsctl_ctldir_mode; vap->va_type = VDIR; /* * We live in the now (for atime). */ gethrestime(&now); vap->va_atime = now; /* FreeBSD: Reset chflags(2) flags. */ vap->va_flags = 0; vap->va_nodeid = node->sn_id; /* At least '.' and '..'. */ vap->va_nlink = 2; } #ifndef _OPENSOLARIS_SYS_VNODE_H_ struct vop_fid_args { struct vnode *a_vp; struct fid *a_fid; }; #endif static int zfsctl_common_fid(struct vop_fid_args *ap) { vnode_t *vp = ap->a_vp; fid_t *fidp = (void *)ap->a_fid; sfs_node_t *node = vp->v_data; uint64_t object = node->sn_id; zfid_short_t *zfid; int i; zfid = (zfid_short_t *)fidp; zfid->zf_len = SHORT_FID_LEN; for (i = 0; i < sizeof (zfid->zf_object); i++) zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); /* .zfs nodes always have a generation number of 0 */ for (i = 0; i < sizeof (zfid->zf_gen); i++) zfid->zf_gen[i] = 0; return (0); } #ifndef _SYS_SYSPROTO_H_ struct vop_reclaim_args { struct vnode *a_vp; struct thread *a_td; }; #endif static int zfsctl_common_reclaim(struct vop_reclaim_args *ap) { vnode_t *vp = ap->a_vp; (void) sfs_reclaim_vnode(vp); return (0); } #ifndef _SYS_SYSPROTO_H_ struct vop_print_args { struct vnode *a_vp; }; #endif static int zfsctl_common_print(struct vop_print_args *ap) { sfs_print_node(ap->a_vp->v_data); return (0); } #ifndef _SYS_SYSPROTO_H_ struct vop_getattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; }; #endif /* * Get root directory attributes. */ static int zfsctl_root_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; zfsctl_root_t *node = vp->v_data; zfsctl_common_getattr(vp, vap); vap->va_ctime = node->cmtime; vap->va_mtime = vap->va_ctime; vap->va_birthtime = vap->va_ctime; vap->va_nlink += 1; /* snapdir */ vap->va_size = vap->va_nlink; return (0); } /* * When we lookup "." we still can be asked to lock it * differently, can't we? */ static int zfsctl_relock_dot(vnode_t *dvp, int ltype) { vref(dvp); if (ltype != VOP_ISLOCKED(dvp)) { if (ltype == LK_EXCLUSIVE) vn_lock(dvp, LK_UPGRADE | LK_RETRY); else /* if (ltype == LK_SHARED) */ vn_lock(dvp, LK_DOWNGRADE | LK_RETRY); /* Relock for the "." case may left us with reclaimed vnode. */ if (VN_IS_DOOMED(dvp)) { vrele(dvp); return (SET_ERROR(ENOENT)); } } return (0); } /* * Special case the handling of "..". */ static int zfsctl_root_lookup(struct vop_lookup_args *ap) { struct componentname *cnp = ap->a_cnp; vnode_t *dvp = ap->a_dvp; vnode_t **vpp = ap->a_vpp; int flags = ap->a_cnp->cn_flags; int lkflags = ap->a_cnp->cn_lkflags; int nameiop = ap->a_cnp->cn_nameiop; int err; ASSERT3S(dvp->v_type, ==, VDIR); if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) return (SET_ERROR(ENOTSUP)); if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') { err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK); if (err == 0) *vpp = dvp; } else if ((flags & ISDOTDOT) != 0) { err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL, lkflags, vpp); } else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) { err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp); } else { err = SET_ERROR(ENOENT); } if (err != 0) *vpp = NULL; return (err); } static int zfsctl_root_readdir(struct vop_readdir_args *ap) { struct dirent entry; vnode_t *vp = ap->a_vp; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfsctl_root_t *node = vp->v_data; zfs_uio_t uio; int *eofp = ap->a_eofflag; off_t dots_offset; int error; zfs_uio_init(&uio, ap->a_uio); ASSERT3S(vp->v_type, ==, VDIR); /* * FIXME: this routine only ever emits 3 entries and does not tolerate * being called with a buffer too small to handle all of them. * * The check below facilitates the idiom of repeating calls until the * count to return is 0. */ if (zfs_uio_offset(&uio) == 3 * sizeof (entry)) { return (0); } error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio, &dots_offset); if (error != 0) { if (error == ENAMETOOLONG) /* ran out of destination space */ error = 0; return (error); } if (zfs_uio_offset(&uio) != dots_offset) return (SET_ERROR(EINVAL)); CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name)); entry.d_fileno = node->snapdir->sn_id; entry.d_type = DT_DIR; strcpy(entry.d_name, node->snapdir->sn_name); entry.d_namlen = strlen(entry.d_name); entry.d_reclen = sizeof (entry); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio)); if (error != 0) { if (error == ENAMETOOLONG) error = 0; return (SET_ERROR(error)); } if (eofp != NULL) *eofp = 1; return (0); } static int zfsctl_root_vptocnp(struct vop_vptocnp_args *ap) { static const char dotzfs_name[4] = ".zfs"; vnode_t *dvp; int error; if (*ap->a_buflen < sizeof (dotzfs_name)) return (SET_ERROR(ENOMEM)); error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL, LK_SHARED, &dvp); if (error != 0) return (SET_ERROR(error)); VOP_UNLOCK1(dvp); *ap->a_vpp = dvp; *ap->a_buflen -= sizeof (dotzfs_name); bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name)); return (0); } static int zfsctl_common_pathconf(struct vop_pathconf_args *ap) { /* * We care about ACL variables so that user land utilities like ls * can display them correctly. Since the ctldir's st_dev is set to be * the same as the parent dataset, we must support all variables that * it supports. */ switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = MIN(LONG_MAX, ZFS_LINK_MAX); return (0); case _PC_FILESIZEBITS: *ap->a_retval = 64; return (0); case _PC_MIN_HOLE_SIZE: *ap->a_retval = (int)SPA_MINBLOCKSIZE; return (0); case _PC_ACL_EXTENDED: *ap->a_retval = 0; return (0); case _PC_ACL_NFS4: *ap->a_retval = 1; return (0); case _PC_ACL_PATH_MAX: *ap->a_retval = ACL_MAX_ENTRIES; return (0); case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; return (0); default: return (vop_stdpathconf(ap)); } } /* * Returns a trivial ACL */ static int zfsctl_common_getacl(struct vop_getacl_args *ap) { int i; if (ap->a_type != ACL_TYPE_NFS4) return (EINVAL); acl_nfs4_sync_acl_from_mode(ap->a_aclp, zfsctl_ctldir_mode, 0); /* * acl_nfs4_sync_acl_from_mode assumes that the owner can always modify * attributes. That is not the case for the ctldir, so we must clear * those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs * aren't supported by the ctldir. */ for (i = 0; i < ap->a_aclp->acl_cnt; i++) { struct acl_entry *entry; entry = &(ap->a_aclp->acl_entry[i]); entry->ae_perm &= ~(ACL_WRITE_ACL | ACL_WRITE_OWNER | ACL_WRITE_ATTRIBUTES | ACL_WRITE_NAMED_ATTRS | ACL_READ_NAMED_ATTRS); } return (0); } static struct vop_vector zfsctl_ops_root = { .vop_default = &default_vnodeops, #if __FreeBSD_version >= 1300121 .vop_fplookup_vexec = VOP_EAGAIN, #endif #if __FreeBSD_version >= 1300139 .vop_fplookup_symlink = VOP_EAGAIN, #endif .vop_open = zfsctl_common_open, .vop_close = zfsctl_common_close, .vop_ioctl = VOP_EINVAL, .vop_getattr = zfsctl_root_getattr, .vop_access = zfsctl_common_access, .vop_readdir = zfsctl_root_readdir, .vop_lookup = zfsctl_root_lookup, .vop_inactive = VOP_NULL, .vop_reclaim = zfsctl_common_reclaim, .vop_fid = zfsctl_common_fid, .vop_print = zfsctl_common_print, .vop_vptocnp = zfsctl_root_vptocnp, .vop_pathconf = zfsctl_common_pathconf, .vop_getacl = zfsctl_common_getacl, #if __FreeBSD_version >= 1400043 .vop_add_writecount = vop_stdadd_writecount_nomsync, #endif }; VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root); static int zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname) { objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; dmu_objset_name(os, zname); if (strlen(zname) + 1 + strlen(name) >= len) return (SET_ERROR(ENAMETOOLONG)); (void) strcat(zname, "@"); (void) strcat(zname, name); return (0); } static int zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id) { objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os; int err; err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id); return (err); } /* * Given a vnode get a root vnode of a filesystem mounted on top of * the vnode, if any. The root vnode is referenced and locked. * If no filesystem is mounted then the orinal vnode remains referenced * and locked. If any error happens the orinal vnode is unlocked and * released. */ static int zfsctl_mounted_here(vnode_t **vpp, int flags) { struct mount *mp; int err; ASSERT_VOP_LOCKED(*vpp, __func__); ASSERT3S((*vpp)->v_type, ==, VDIR); if ((mp = (*vpp)->v_mountedhere) != NULL) { err = vfs_busy(mp, 0); KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err)); KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint")); vput(*vpp); err = VFS_ROOT(mp, flags, vpp); vfs_unbusy(mp); return (err); } return (EJUSTRETURN); } typedef struct { const char *snap_name; uint64_t snap_id; } snapshot_setup_arg_t; static void zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg) { snapshot_setup_arg_t *ssa = arg; sfs_node_t *node; ASSERT_VOP_ELOCKED(vp, __func__); node = sfs_alloc_node(sizeof (sfs_node_t), ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id); zfsctl_common_vnode_setup(vp, node); /* We have to support recursive locking. */ VN_LOCK_AREC(vp); } /* * Lookup entry point for the 'snapshot' directory. Try to open the * snapshot if it exist, creating the pseudo filesystem vnode as necessary. * Perform a mount of the associated dataset on top of the vnode. * There are four possibilities: * - the snapshot node and vnode do not exist * - the snapshot vnode is covered by the mounted snapshot * - the snapshot vnode is not covered yet, the mount operation is in progress * - the snapshot vnode is not covered, because the snapshot has been unmounted * The last two states are transient and should be relatively short-lived. */ static int zfsctl_snapdir_lookup(struct vop_lookup_args *ap) { vnode_t *dvp = ap->a_dvp; vnode_t **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; char name[NAME_MAX + 1]; char fullname[ZFS_MAX_DATASET_NAME_LEN]; char *mountpoint; size_t mountpoint_len; zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; uint64_t snap_id; int nameiop = cnp->cn_nameiop; int lkflags = cnp->cn_lkflags; int flags = cnp->cn_flags; int err; ASSERT3S(dvp->v_type, ==, VDIR); if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) return (SET_ERROR(ENOTSUP)); if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') { err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK); if (err == 0) *vpp = dvp; return (err); } if (flags & ISDOTDOT) { err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags, vpp); return (err); } if (cnp->cn_namelen >= sizeof (name)) return (SET_ERROR(ENAMETOOLONG)); strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1); err = zfsctl_snapshot_lookup(dvp, name, &snap_id); if (err != 0) return (SET_ERROR(ENOENT)); for (;;) { snapshot_setup_arg_t ssa; ssa.snap_name = name; ssa.snap_id = snap_id; err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR, snap_id, "zfs", &zfsctl_ops_snapshot, zfsctl_snapshot_vnode_setup, &ssa, vpp); if (err != 0) return (err); /* Check if a new vnode has just been created. */ if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE) break; /* * Check if a snapshot is already mounted on top of the vnode. */ err = zfsctl_mounted_here(vpp, lkflags); if (err != EJUSTRETURN) return (err); /* * If the vnode is not covered, then either the mount operation * is in progress or the snapshot has already been unmounted * but the vnode hasn't been inactivated and reclaimed yet. * We can try to re-use the vnode in the latter case. */ VI_LOCK(*vpp); if (((*vpp)->v_iflag & VI_MOUNT) == 0) { VI_UNLOCK(*vpp); /* * Upgrade to exclusive lock in order to: * - avoid race conditions * - satisfy the contract of mount_snapshot() */ err = VOP_LOCK(*vpp, LK_TRYUPGRADE); if (err == 0) break; } else { VI_UNLOCK(*vpp); } /* * In this state we can loop on uncontested locks and starve * the thread doing the lengthy, non-trivial mount operation. * So, yield to prevent that from happening. */ vput(*vpp); kern_yield(PRI_USER); } VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof (fullname), fullname)); mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) + strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1; mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); (void) snprintf(mountpoint, mountpoint_len, "%s/" ZFS_CTLDIR_NAME "/snapshot/%s", dvp->v_vfsp->mnt_stat.f_mntonname, name); - err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0); + err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0, + dvp->v_vfsp); kmem_free(mountpoint, mountpoint_len); if (err == 0) { /* * Fix up the root vnode mounted on .zfs/snapshot/. * * This is where we lie about our v_vfsp in order to * make .zfs/snapshot/ accessible over NFS * without requiring manual mounts of . */ ASSERT3P(VTOZ(*vpp)->z_zfsvfs, !=, zfsvfs); VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; /* Clear the root flag (set via VFS_ROOT) as well. */ (*vpp)->v_vflag &= ~VV_ROOT; } if (err != 0) *vpp = NULL; return (err); } static int zfsctl_snapdir_readdir(struct vop_readdir_args *ap) { char snapname[ZFS_MAX_DATASET_NAME_LEN]; struct dirent entry; vnode_t *vp = ap->a_vp; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; zfs_uio_t uio; int *eofp = ap->a_eofflag; off_t dots_offset; int error; zfs_uio_init(&uio, ap->a_uio); ASSERT3S(vp->v_type, ==, VDIR); error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, &uio, &dots_offset); if (error != 0) { if (error == ENAMETOOLONG) /* ran out of destination space */ error = 0; return (error); } ZFS_ENTER(zfsvfs); for (;;) { uint64_t cookie; uint64_t id; cookie = zfs_uio_offset(&uio) - dots_offset; dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), snapname, &id, &cookie, NULL); dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); if (error != 0) { if (error == ENOENT) { if (eofp != NULL) *eofp = 1; error = 0; } ZFS_EXIT(zfsvfs); return (error); } entry.d_fileno = id; entry.d_type = DT_DIR; strcpy(entry.d_name, snapname); entry.d_namlen = strlen(entry.d_name); entry.d_reclen = sizeof (entry); error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio)); if (error != 0) { if (error == ENAMETOOLONG) error = 0; ZFS_EXIT(zfsvfs); return (SET_ERROR(error)); } zfs_uio_setoffset(&uio, cookie + dots_offset); } /* NOTREACHED */ } static int zfsctl_snapdir_getattr(struct vop_getattr_args *ap) { vnode_t *vp = ap->a_vp; vattr_t *vap = ap->a_vap; zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data; dsl_dataset_t *ds; uint64_t snap_count; int err; ZFS_ENTER(zfsvfs); ds = dmu_objset_ds(zfsvfs->z_os); zfsctl_common_getattr(vp, vap); vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os); vap->va_mtime = vap->va_ctime; vap->va_birthtime = vap->va_ctime; if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); if (err != 0) { ZFS_EXIT(zfsvfs); return (err); } vap->va_nlink += snap_count; } vap->va_size = vap->va_nlink; ZFS_EXIT(zfsvfs); return (0); } static struct vop_vector zfsctl_ops_snapdir = { .vop_default = &default_vnodeops, #if __FreeBSD_version >= 1300121 .vop_fplookup_vexec = VOP_EAGAIN, #endif #if __FreeBSD_version >= 1300139 .vop_fplookup_symlink = VOP_EAGAIN, #endif .vop_open = zfsctl_common_open, .vop_close = zfsctl_common_close, .vop_getattr = zfsctl_snapdir_getattr, .vop_access = zfsctl_common_access, .vop_readdir = zfsctl_snapdir_readdir, .vop_lookup = zfsctl_snapdir_lookup, .vop_reclaim = zfsctl_common_reclaim, .vop_fid = zfsctl_common_fid, .vop_print = zfsctl_common_print, .vop_pathconf = zfsctl_common_pathconf, .vop_getacl = zfsctl_common_getacl, #if __FreeBSD_version >= 1400043 .vop_add_writecount = vop_stdadd_writecount_nomsync, #endif }; VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir); static int zfsctl_snapshot_inactive(struct vop_inactive_args *ap) { vnode_t *vp = ap->a_vp; vrecycle(vp); return (0); } static int zfsctl_snapshot_reclaim(struct vop_reclaim_args *ap) { vnode_t *vp = ap->a_vp; void *data = vp->v_data; sfs_reclaim_vnode(vp); sfs_destroy_node(data); return (0); } static int zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap) { struct mount *mp; vnode_t *dvp; vnode_t *vp; sfs_node_t *node; size_t len; int locked; int error; vp = ap->a_vp; node = vp->v_data; len = strlen(node->sn_name); if (*ap->a_buflen < len) return (SET_ERROR(ENOMEM)); /* * Prevent unmounting of the snapshot while the vnode lock * is not held. That is not strictly required, but allows * us to assert that an uncovered snapshot vnode is never * "leaked". */ mp = vp->v_mountedhere; if (mp == NULL) return (SET_ERROR(ENOENT)); error = vfs_busy(mp, 0); KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error)); /* * We can vput the vnode as we can now depend on the reference owned * by the busied mp. But we also need to hold the vnode, because * the reference may go after vfs_unbusy() which has to be called * before we can lock the vnode again. */ locked = VOP_ISLOCKED(vp); #if __FreeBSD_version >= 1300045 enum vgetstate vs = vget_prep(vp); #else vhold(vp); #endif vput(vp); /* Look up .zfs/snapshot, our parent. */ error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp); if (error == 0) { VOP_UNLOCK1(dvp); *ap->a_vpp = dvp; *ap->a_buflen -= len; bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len); } vfs_unbusy(mp); #if __FreeBSD_version >= 1300045 vget_finish(vp, locked | LK_RETRY, vs); #else vget(vp, locked | LK_VNHELD | LK_RETRY, curthread); #endif return (error); } /* * These VP's should never see the light of day. They should always * be covered. */ static struct vop_vector zfsctl_ops_snapshot = { .vop_default = NULL, /* ensure very restricted access */ #if __FreeBSD_version >= 1300121 .vop_fplookup_vexec = VOP_EAGAIN, #endif #if __FreeBSD_version >= 1300139 .vop_fplookup_symlink = VOP_EAGAIN, #endif .vop_open = zfsctl_common_open, .vop_close = zfsctl_common_close, .vop_inactive = zfsctl_snapshot_inactive, #if __FreeBSD_version >= 1300045 .vop_need_inactive = vop_stdneed_inactive, #endif .vop_reclaim = zfsctl_snapshot_reclaim, .vop_vptocnp = zfsctl_snapshot_vptocnp, .vop_lock1 = vop_stdlock, .vop_unlock = vop_stdunlock, .vop_islocked = vop_stdislocked, .vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */ .vop_print = zfsctl_common_print, #if __FreeBSD_version >= 1400043 .vop_add_writecount = vop_stdadd_writecount_nomsync, #endif }; VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot); int zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp) { zfsvfs_t *zfsvfs __unused = vfsp->vfs_data; vnode_t *vp; int error; ASSERT3P(zfsvfs->z_ctldir, !=, NULL); *zfsvfsp = NULL; error = sfs_vnode_get(vfsp, LK_EXCLUSIVE, ZFSCTL_INO_SNAPDIR, objsetid, &vp); if (error == 0 && vp != NULL) { /* * XXX Probably need to at least reference, if not busy, the mp. */ if (vp->v_mountedhere != NULL) *zfsvfsp = vp->v_mountedhere->mnt_data; vput(vp); } if (*zfsvfsp == NULL) return (SET_ERROR(EINVAL)); return (0); } /* * Unmount any snapshots for the given filesystem. This is called from * zfs_umount() - if we have a ctldir, then go through and unmount all the * snapshots. */ int zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr) { char snapname[ZFS_MAX_DATASET_NAME_LEN]; zfsvfs_t *zfsvfs = vfsp->vfs_data; struct mount *mp; vnode_t *vp; uint64_t cookie; int error; ASSERT3P(zfsvfs->z_ctldir, !=, NULL); cookie = 0; for (;;) { uint64_t id; dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG); error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname), snapname, &id, &cookie, NULL); dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG); if (error != 0) { if (error == ENOENT) error = 0; break; } for (;;) { error = sfs_vnode_get(vfsp, LK_EXCLUSIVE, ZFSCTL_INO_SNAPDIR, id, &vp); if (error != 0 || vp == NULL) break; mp = vp->v_mountedhere; /* * v_mountedhere being NULL means that the * (uncovered) vnode is in a transient state * (mounting or unmounting), so loop until it * settles down. */ if (mp != NULL) break; vput(vp); } if (error != 0) break; if (vp == NULL) continue; /* no mountpoint, nothing to do */ /* * The mount-point vnode is kept locked to avoid spurious EBUSY * from a concurrent umount. * The vnode lock must have recursive locking enabled. */ vfs_ref(mp); error = dounmount(mp, fflags, curthread); KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1, ("extra references after unmount")); vput(vp); if (error != 0) break; } KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0, ("force unmounting failed")); return (error); } int zfsctl_snapshot_unmount(const char *snapname, int flags __unused) { vfs_t *vfsp = NULL; zfsvfs_t *zfsvfs = NULL; if (strchr(snapname, '@') == NULL) return (0); int err = getzfsvfs(snapname, &zfsvfs); if (err != 0) { ASSERT3P(zfsvfs, ==, NULL); return (0); } vfsp = zfsvfs->z_vfs; ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs->z_os))); vfs_ref(vfsp); vfs_unbusy(vfsp); return (dounmount(vfsp, MS_FORCE, curthread)); }