diff --git a/sys/fs/fuse/fuse_internal.c b/sys/fs/fuse/fuse_internal.c index ffa4a40095b2..403116593bd9 100644 --- a/sys/fs/fuse/fuse_internal.c +++ b/sys/fs/fuse/fuse_internal.c @@ -1,1337 +1,1346 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_file.h" #include "fuse_internal.h" #include "fuse_io.h" #include "fuse_ipc.h" #include "fuse_node.h" #include "fuse_file.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len); #endif counter_u64_t fuse_lookup_cache_hits; counter_u64_t fuse_lookup_cache_misses; SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, &fuse_lookup_cache_hits, "number of positive cache hits in lookup"); SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, &fuse_lookup_cache_misses, "number of cache misses in lookup"); int fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags, struct vnode **vpp) { struct bintime now; struct thread *td = curthread; uint64_t nodeid = ino; int error; *vpp = NULL; error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp, fuse_vnode_cmp, &nodeid); if (error) return error; /* * Check the entry cache timeout. We have to do this within fusefs * instead of by using cache_enter_time/cache_lookup because those * routines are only intended to work with pathnames, not inodes */ if (*vpp != NULL) { getbinuptime(&now); if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){ counter_u64_add(fuse_lookup_cache_hits, 1); return 0; } else { /* Entry cache timeout */ counter_u64_add(fuse_lookup_cache_misses, 1); cache_purge(*vpp); vput(*vpp); *vpp = NULL; } } return 0; } SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin); /* Synchronously send a FUSE_ACCESS operation */ int fuse_internal_access(struct vnode *vp, accmode_t mode, struct thread *td, struct ucred *cred) { int err = 0; uint32_t mask = F_OK; int dataflags; int vtype; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; mp = vnode_mount(vp); vtype = vnode_vtype(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; if (mode == 0) return 0; if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { switch (vp->v_type) { case VDIR: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ case VREG: return EROFS; default: break; } } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { if (fuse_match_cred(data->daemoncred, cred)) return EPERM; } if (dataflags & FSESS_DEFAULT_PERMISSIONS) { struct vattr va; fuse_internal_getattr(vp, &va, cred, td); return vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, mode, cred); } if (mode & VADMIN) { /* * The FUSE protocol doesn't have an equivalent of VADMIN, so * it's a bug if we ever reach this point with that bit set. */ SDT_PROBE0(fusefs, , internal, access_vadmin); } if (fsess_not_impl(mp, FUSE_ACCESS)) return 0; if ((mode & (VWRITE | VAPPEND)) != 0) mask |= W_OK; if ((mode & VREAD) != 0) mask |= R_OK; if ((mode & VEXEC) != 0) mask |= X_OK; fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); fai = fdi.indata; fai->mask = mask; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_ACCESS); err = 0; } return err; } /* * Cache FUSE attributes from attr, in attribute cache associated with vnode * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the * converted attributes there as well. * * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do * return the result to the caller). */ void fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr, uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap, bool from_server) { struct mount *mp; struct fuse_vnode_data *fvdat; struct fuse_data *data; struct vattr *vp_cache_at; mp = vnode_mount(vp); fvdat = VTOFUD(vp); data = fuse_get_mpdata(mp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); fuse_validity_2_bintime(attr_valid, attr_valid_nsec, &fvdat->attr_cache_timeout); if (vnode_isreg(vp) && fvdat->cached_attrs.va_size != VNOVAL && attr->size != fvdat->cached_attrs.va_size) { if ( data->cache_mode == FUSE_CACHE_WB && fvdat->flag & FN_SIZECHANGE) { const char *msg; /* * The server changed the file's size even though we're * using writeback cacheing and and we have outstanding * dirty writes! That's a server bug. */ if (fuse_libabi_geq(data, 7, 23)) { msg = "writeback cache incoherent!." "To prevent data corruption, disable " "the writeback cache according to your " "FUSE server's documentation."; } else { msg = "writeback cache incoherent!." "To prevent data corruption, disable " "the writeback cache by setting " "vfs.fusefs.data_cache_mode to 0 or 1."; } fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg); } if (fuse_vnode_attr_cache_valid(vp) && data->cache_mode != FUSE_CACHE_UC) { /* * The server changed the file's size even though we * have it cached and our cache has not yet expired. * That's a bug. */ fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT, "cache incoherent! " "To prevent " "data corruption, disable the data cache " "by mounting with -o direct_io, or as " "directed otherwise by your FUSE server's " "documentation."); } } /* Fix our buffers if the filesize changed without us knowing */ if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { (void)fuse_vnode_setsize(vp, attr->size, from_server); fvdat->cached_attrs.va_size = attr->size; } if (attr_valid > 0 || attr_valid_nsec > 0) vp_cache_at = &(fvdat->cached_attrs); else if (vap != NULL) vp_cache_at = vap; else return; vattr_null(vp_cache_at); vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; vp_cache_at->va_fileid = attr->ino; vp_cache_at->va_mode = attr->mode & ~S_IFMT; vp_cache_at->va_nlink = attr->nlink; vp_cache_at->va_uid = attr->uid; vp_cache_at->va_gid = attr->gid; vp_cache_at->va_rdev = attr->rdev; vp_cache_at->va_size = attr->size; /* XXX on i386, seconds are truncated to 32 bits */ vp_cache_at->va_atime.tv_sec = attr->atime; vp_cache_at->va_atime.tv_nsec = attr->atimensec; vp_cache_at->va_mtime.tv_sec = attr->mtime; vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; vp_cache_at->va_ctime.tv_sec = attr->ctime; vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) vp_cache_at->va_blocksize = attr->blksize; else vp_cache_at->va_blocksize = PAGE_SIZE; vp_cache_at->va_type = IFTOVT(attr->mode); vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; vp_cache_at->va_flags = 0; if (vap != vp_cache_at && vap != NULL) memcpy(vap, vp_cache_at, sizeof(*vap)); } /* fsync */ int fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) { if (tick->tk_aw_ohead.error == ENOSYS) { fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); } return 0; } int fuse_internal_fsync(struct vnode *vp, struct thread *td, int waitfor, bool datasync) { struct fuse_fsync_in *ffsi = NULL; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct mount *mp = vnode_mount(vp); int op = FUSE_FSYNC; int err = 0; if (fsess_not_impl(vnode_mount(vp), (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { return 0; } if (vnode_isdir(vp)) op = FUSE_FSYNCDIR; if (fsess_not_impl(mp, op)) return 0; fdisp_init(&fdi, sizeof(*ffsi)); /* * fsync every open file handle for this file, because we can't be sure * which file handle the caller is really referring to. */ LIST_FOREACH(fufh, &fvdat->handles, next) { fdi.iosize = sizeof(*ffsi); if (ffsi == NULL) fdisp_make_vp(&fdi, op, vp, td, NULL); else fdisp_refresh_vp(&fdi, op, vp, td, NULL); ffsi = fdi.indata; ffsi->fh = fufh->fh_id; ffsi->fsync_flags = 0; if (datasync) ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC; if (waitfor == MNT_WAIT) { err = fdisp_wait_answ(&fdi); } else { fuse_insert_callback(fdi.tick, fuse_internal_fsync_callback); fuse_insert_message(fdi.tick, false); } if (err == ENOSYS) { /* ENOSYS means "success, and don't call again" */ fsess_set_notimpl(mp, op); err = 0; break; } } fdisp_destroy(&fdi); return err; } /* Asynchronous invalidation */ SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry, "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*"); int fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) { struct fuse_notify_inval_entry_out fnieo; struct componentname cn; struct vnode *dvp, *vp; char name[PATH_MAX]; int err; if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) return (err); if (fnieo.namelen >= sizeof(name)) return (EINVAL); if ((err = uiomove(name, fnieo.namelen, uio)) != 0) return (err); name[fnieo.namelen] = '\0'; /* fusefs does not cache "." or ".." entries */ if (strncmp(name, ".", sizeof(".")) == 0 || strncmp(name, "..", sizeof("..")) == 0) return (0); if (fnieo.parent == FUSE_ROOT_ID) err = VFS_ROOT(mp, LK_SHARED, &dvp); else err = fuse_internal_get_cached_vnode( mp, fnieo.parent, LK_SHARED, &dvp); SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name); /* * If dvp is not in the cache, then it must've been reclaimed. And * since fuse_vnop_reclaim does a cache_purge, name's entry must've * been invalidated already. So we can safely return if dvp == NULL */ if (err != 0 || dvp == NULL) return (err); /* * XXX we can't check dvp's generation because the FUSE invalidate * entry message doesn't include it. Worse case is that we invalidate * an entry that didn't need to be invalidated. */ cn.cn_nameiop = LOOKUP; cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ cn.cn_thread = curthread; cn.cn_cred = curthread->td_ucred; cn.cn_lkflags = LK_SHARED; cn.cn_pnbuf = NULL; cn.cn_nameptr = name; cn.cn_namelen = fnieo.namelen; err = cache_lookup(dvp, &vp, &cn, NULL, NULL); MPASS(err == 0); fuse_vnode_clear_attr_cache(dvp); vput(dvp); return (0); } SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode, "struct vnode*", "struct fuse_notify_inval_inode_out *"); int fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio) { struct fuse_notify_inval_inode_out fniio; struct vnode *vp; int err; if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0) return (err); if (fniio.ino == FUSE_ROOT_ID) err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp); else err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED, &vp); SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio); if (err != 0 || vp == NULL) return (err); /* * XXX we can't check vp's generation because the FUSE invalidate * entry message doesn't include it. Worse case is that we invalidate * an inode that didn't need to be invalidated. */ /* * Flush and invalidate buffers if off >= 0. Technically we only need * to flush and invalidate the range of offsets [off, off + len), but * for simplicity's sake we do everything. */ if (fniio.off >= 0) fuse_io_invalbuf(vp, curthread); fuse_vnode_clear_attr_cache(vp); vput(vp); return (0); } /* mknod */ int fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) { struct fuse_data *data; struct fuse_mknod_in fmni; size_t insize; data = fuse_get_mpdata(dvp->v_mount); fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); fmni.rdev = vap->va_rdev; if (fuse_libabi_geq(data, 7, 12)) { insize = sizeof(fmni); fmni.umask = curthread->td_proc->p_pd->pd_cmask; } else { insize = FUSE_COMPAT_MKNOD_IN_SIZE; } return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, insize, vap->va_type)); } /* readdir */ int fuse_internal_readdir(struct vnode *vp, struct uio *uio, off_t startoff, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *ncookies, u_long *cookies) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri = NULL; int fnd_start; if (uio_resid(uio) == 0) return 0; fdisp_init(&fdi, 0); /* * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p * I/O). */ /* * fnd_start is set non-zero once the offset in the directory gets * to the startoff. This is done because directories must be read * from the beginning (offset == 0) when fuse_vnop_readdir() needs * to do an open of the directory. * If it is not set non-zero here, it will be set non-zero in * fuse_internal_readdir_processdata() when uio_offset == startoff. */ fnd_start = 0; if (uio->uio_offset == startoff) fnd_start = 1; while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); if (fri == NULL) fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); else fdisp_refresh_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); fri->size = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_read); if ((err = fdisp_wait_answ(&fdi))) break; if ((err = fuse_internal_readdir_processdata(uio, startoff, &fnd_start, fri->size, fdi.answ, fdi.iosize, cookediov, ncookies, &cookies))) break; } fdisp_destroy(&fdi); return ((err == -1) ? 0 : err); } /* * Return -1 to indicate that this readdir is finished, 0 if it copied * all the directory data read in and it may be possible to read more * and greater than 0 for a failure. */ int fuse_internal_readdir_processdata(struct uio *uio, off_t startoff, int *fnd_start, size_t reqsize, void *buf, size_t bufsize, struct fuse_iov *cookediov, int *ncookies, u_long **cookiesp) { int err = 0; int oreclen; size_t freclen; struct dirent *de; struct fuse_dirent *fudge; u_long *cookies; cookies = *cookiesp; if (bufsize < FUSE_NAME_OFFSET) return -1; for (;;) { if (bufsize < FUSE_NAME_OFFSET) { err = -1; break; } fudge = (struct fuse_dirent *)buf; freclen = FUSE_DIRENT_SIZE(fudge); if (bufsize < freclen) { /* * This indicates a partial directory entry at the * end of the directory data. */ err = -1; break; } #ifdef ZERO_PAD_INCOMPLETE_BUFS if (isbzero(buf, FUSE_NAME_OFFSET)) { err = -1; break; } #endif if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { err = EINVAL; break; } oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *) &fudge->namelen); if (oreclen > uio_resid(uio)) { /* Out of space for the dir so we are done. */ err = -1; break; } /* * Don't start to copy the directory entries out until * the requested offset in the directory is found. */ if (*fnd_start != 0) { fiov_adjust(cookediov, oreclen); bzero(cookediov->base, oreclen); de = (struct dirent *)cookediov->base; de->d_fileno = fudge->ino; de->d_off = fudge->off; de->d_reclen = oreclen; de->d_type = fudge->type; de->d_namlen = fudge->namelen; memcpy((char *)cookediov->base + sizeof(struct dirent) - MAXNAMLEN - 1, (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); dirent_terminate(de); err = uiomove(cookediov->base, cookediov->len, uio); if (err) break; if (cookies != NULL) { if (*ncookies == 0) { err = -1; break; } *cookies = fudge->off; cookies++; (*ncookies)--; } } else if (startoff == fudge->off) *fnd_start = 1; buf = (char *)buf + freclen; bufsize -= freclen; uio_setoffset(uio, fudge->off); } *cookiesp = cookies; return err; } /* remove */ int fuse_internal_remove(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, enum fuse_opcode op) { struct fuse_dispatcher fdi; nlink_t nlink; int err = 0; fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make_vp(&fdi, op, dvp, cnp->cn_thread, cnp->cn_cred); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err) return (err); /* * Access the cached nlink even if the attr cached has expired. If * it's inaccurate, the worst that will happen is: * 1) We'll recycle the vnode even though the file has another link we * don't know about, costing a bit of cpu time, or * 2) We won't recycle the vnode even though all of its links are gone. * It will linger around until vnlru reclaims it, costing a bit of * temporary memory. */ nlink = VTOFUD(vp)->cached_attrs.va_nlink--; /* * Purge the parent's attribute cache because the daemon * should've updated its mtime and ctime. */ fuse_vnode_clear_attr_cache(dvp); /* NB: nlink could be zero if it was never cached */ if (nlink <= 1 || vnode_vtype(vp) == VDIR) { fuse_internal_vnode_disappear(vp); } else { cache_purge(vp); fuse_vnode_update(vp, FN_CTIMECHANGE); } return err; } /* rename */ int fuse_internal_rename(struct vnode *fdvp, struct componentname *fcnp, struct vnode *tdvp, struct componentname *tcnp) { struct fuse_dispatcher fdi; struct fuse_rename_in *fri; int err = 0; fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, tcnp->cn_thread, tcnp->cn_cred); fri = fdi.indata; fri->newdir = VTOI(tdvp); memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, fcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, tcnp->cn_nameptr, tcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 1] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); return err; } /* strategy */ /* entity creation */ void fuse_internal_newentry_makerequest(struct mount *mp, uint64_t dnid, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, struct fuse_dispatcher *fdip) { fdip->iosize = bufsize + cnp->cn_namelen + 1; fdisp_make(fdip, op, mp, dnid, cnp->cn_thread, cnp->cn_cred); memcpy(fdip->indata, buf, bufsize); memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; } int fuse_internal_newentry_core(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp, struct fuse_dispatcher *fdip) { int err = 0; struct fuse_entry_out *feo; struct mount *mp = vnode_mount(dvp); if ((err = fdisp_wait_answ(fdip))) { return err; } feo = fdip->answ; if ((err = fuse_internal_checkentry(feo, vtyp))) { return err; } err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); if (err) { fuse_internal_forget_send(mp, cnp->cn_thread, cnp->cn_cred, feo->nodeid, 1); return err; } /* * Purge the parent's attribute cache because the daemon should've * updated its mtime and ctime */ fuse_vnode_clear_attr_cache(dvp); fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL, true); return err; } int fuse_internal_newentry(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, enum vtype vtype) { int err; struct fuse_dispatcher fdi; struct mount *mp = vnode_mount(dvp); fdisp_init(&fdi, 0); fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, bufsize, &fdi); err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); fdisp_destroy(&fdi); return err; } /* entity destruction */ int fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) { fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); return 0; } void fuse_internal_forget_send(struct mount *mp, struct thread *td, struct ucred *cred, uint64_t nodeid, uint64_t nlookup) { struct fuse_dispatcher fdi; struct fuse_forget_in *ffi; /* * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", * (long long unsigned) nodeid)); */ fdisp_init(&fdi, sizeof(*ffi)); fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); ffi = fdi.indata; ffi->nlookup = nlookup; fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* Fetch the vnode's attributes from the daemon*/ int fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct fuse_dispatcher fdi; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_getattr_in *fgai; struct fuse_attr_out *fao; off_t old_filesize = fvdat->cached_attrs.va_size; + struct timespec old_atime = fvdat->cached_attrs.va_atime; struct timespec old_ctime = fvdat->cached_attrs.va_ctime; struct timespec old_mtime = fvdat->cached_attrs.va_mtime; enum vtype vtyp; int err; fdisp_init(&fdi, sizeof(*fgai)); fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); fgai = fdi.indata; /* * We could look up a file handle and set it in fgai->fh, but that * involves extra runtime work and I'm unaware of any file systems that * care. */ fgai->getattr_flags = 0; if ((err = fdisp_wait_answ(&fdi))) { if (err == ENOENT) fuse_internal_vnode_disappear(vp); goto out; } fao = (struct fuse_attr_out *)fdi.answ; vtyp = IFTOVT(fao->attr.mode); if (fvdat->flag & FN_SIZECHANGE) fao->attr.size = old_filesize; + if (fvdat->flag & FN_ATIMECHANGE) { + fao->attr.atime = old_atime.tv_sec; + fao->attr.atimensec = old_atime.tv_nsec; + } if (fvdat->flag & FN_CTIMECHANGE) { fao->attr.ctime = old_ctime.tv_sec; fao->attr.ctimensec = old_ctime.tv_nsec; } if (fvdat->flag & FN_MTIMECHANGE) { fao->attr.mtime = old_mtime.tv_sec; fao->attr.mtimensec = old_mtime.tv_nsec; } fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, vap, true); if (vtyp != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; } out: fdisp_destroy(&fdi); return err; } /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ int fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct vattr *attrs; if ((attrs = VTOVA(vp)) != NULL) { *vap = *attrs; /* struct copy */ return 0; } return fuse_internal_do_getattr(vp, vap, cred, td); } void fuse_internal_vnode_disappear(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); fvdat->flag |= FN_REVOKED; cache_purge(vp); } /* fuse start/stop */ SDT_PROBE_DEFINE2(fusefs, , internal, init_done, "struct fuse_data*", "struct fuse_init_out*"); int fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) { int err = 0; struct fuse_data *data = tick->tk_data; struct fuse_init_out *fiio; if ((err = tick->tk_aw_ohead.error)) { goto out; } if ((err = fticket_pull(tick, uio))) { goto out; } fiio = fticket_resp(tick)->base; data->fuse_libabi_major = fiio->major; data->fuse_libabi_minor = fiio->minor; if (!fuse_libabi_geq(data, 7, 4)) { /* * With a little work we could support servers as old as 7.1. * But there would be little payoff. */ SDT_PROBE2(fusefs, , internal, trace, 1, "userpace version too low"); err = EPROTONOSUPPORT; goto out; } if (fuse_libabi_geq(data, 7, 5)) { if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) || fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) { data->max_write = fiio->max_write; if (fiio->flags & FUSE_ASYNC_READ) data->dataflags |= FSESS_ASYNC_READ; if (fiio->flags & FUSE_POSIX_LOCKS) data->dataflags |= FSESS_POSIX_LOCKS; if (fiio->flags & FUSE_EXPORT_SUPPORT) data->dataflags |= FSESS_EXPORT_SUPPORT; if (fiio->flags & FUSE_NO_OPEN_SUPPORT) data->dataflags |= FSESS_NO_OPEN_SUPPORT; if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT) data->dataflags |= FSESS_NO_OPENDIR_SUPPORT; /* * Don't bother to check FUSE_BIG_WRITES, because it's * redundant with max_write */ /* * max_background and congestion_threshold are not * implemented */ } else { err = EINVAL; } } else { /* Old fixed values */ data->max_write = 4096; } if (fuse_libabi_geq(data, 7, 6)) data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf; if (!fuse_libabi_geq(data, 7, 7)) fsess_set_notimpl(data->mp, FUSE_INTERRUPT); if (!fuse_libabi_geq(data, 7, 8)) { fsess_set_notimpl(data->mp, FUSE_BMAP); fsess_set_notimpl(data->mp, FUSE_DESTROY); } if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 && fiio->time_gran <= 1000000000) data->time_gran = fiio->time_gran; else data->time_gran = 1; if (!fuse_libabi_geq(data, 7, 23)) data->cache_mode = fuse_data_cache_mode; else if (fiio->flags & FUSE_WRITEBACK_CACHE) data->cache_mode = FUSE_CACHE_WB; else data->cache_mode = FUSE_CACHE_WT; if (!fuse_libabi_geq(data, 7, 24)) fsess_set_notimpl(data->mp, FUSE_LSEEK); if (!fuse_libabi_geq(data, 7, 28)) fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE); out: if (err) { fdata_set_dead(data); } FUSE_LOCK(); data->dataflags |= FSESS_INITED; SDT_PROBE2(fusefs, , internal, init_done, data, fiio); wakeup(&data->ticketer); FUSE_UNLOCK(); return 0; } void fuse_internal_send_init(struct fuse_data *data, struct thread *td) { struct fuse_init_in *fiii; struct fuse_dispatcher fdi; fdisp_init(&fdi, sizeof(*fiii)); fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); fiii = fdi.indata; fiii->major = FUSE_KERNEL_VERSION; fiii->minor = FUSE_KERNEL_MINOR_VERSION; /* * fusefs currently reads ahead no more than one cache block at a time. * See fuse_read_biobackend */ fiii->max_readahead = maxbcachebuf; /* * Unsupported features: * FUSE_FILE_OPS: No known FUSE server or client supports it * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even * when default ACLs are in use. * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD * doesn't have splice(2). * FUSE_FLOCK_LOCKS: not yet implemented * FUSE_HAS_IOCTL_DIR: not yet implemented * FUSE_AUTO_INVAL_DATA: not yet implemented * FUSE_DO_READDIRPLUS: not yet implemented * FUSE_READDIRPLUS_AUTO: not yet implemented * FUSE_ASYNC_DIO: not yet implemented * FUSE_PARALLEL_DIROPS: not yet implemented * FUSE_HANDLE_KILLPRIV: not yet implemented * FUSE_POSIX_ACL: not yet implemented * FUSE_ABORT_ERROR: not yet implemented * FUSE_CACHE_SYMLINKS: not yet implemented * FUSE_MAX_PAGES: not yet implemented */ fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT; fuse_insert_callback(fdi.tick, fuse_internal_init_callback); fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, * send the request with root credentials */ int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, struct thread *td, struct ucred *cred) { struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; struct mount *mp; pid_t pid = td->td_proc->p_pid; struct fuse_data *data; int dataflags; int err = 0; enum vtype vtyp; int sizechanged = -1; uint64_t newsize = 0; mp = vnode_mount(vp); fvdat = VTOFUD(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); if (!cred) { fdi.finh->uid = 0; fdi.finh->gid = 0; } fsai = fdi.indata; fsai->valid = 0; if (vap->va_uid != (uid_t)VNOVAL) { fsai->uid = vap->va_uid; fsai->valid |= FATTR_UID; } if (vap->va_gid != (gid_t)VNOVAL) { fsai->gid = vap->va_gid; fsai->valid |= FATTR_GID; } if (vap->va_size != VNOVAL) { struct fuse_filehandle *fufh = NULL; /*Truncate to a new value. */ fsai->size = vap->va_size; sizechanged = 1; newsize = vap->va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } VTOFUD(vp)->flag &= ~FN_SIZECHANGE; } if (vap->va_atime.tv_sec != VNOVAL) { fsai->atime = vap->va_atime.tv_sec; fsai->atimensec = vap->va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_ATIME_NOW; + } else if (fvdat->flag & FN_ATIMECHANGE) { + fsai->atime = fvdat->cached_attrs.va_atime.tv_sec; + fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec; + fsai->valid |= FATTR_ATIME; } if (vap->va_mtime.tv_sec != VNOVAL) { fsai->mtime = vap->va_mtime.tv_sec; fsai->mtimensec = vap->va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_MTIME_NOW; } else if (fvdat->flag & FN_MTIMECHANGE) { fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec; fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; } if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) { fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec; fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec; fsai->valid |= FATTR_CTIME; } if (vap->va_mode != (mode_t)VNOVAL) { fsai->mode = vap->va_mode & ALLPERMS; fsai->valid |= FATTR_MODE; } if (!fsai->valid) { goto out; } if ((err = fdisp_wait_answ(&fdi))) goto out; vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); if (vnode_vtype(vp) != vtyp) { if (vnode_vtype(vp) == VNON && vtyp != VNON) { SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " "vnode_vtype is VNON and vtype isn't."); } else { /* * STALE vnode, ditch * * The vnode has changed its type "behind our back". * There's nothing really we can do, so let us just * force an internal revocation and tell the caller to * try again, if interested. */ fuse_internal_vnode_disappear(vp); err = EAGAIN; } } if (err == 0) { struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; - fuse_vnode_undirty_cached_timestamps(vp); + fuse_vnode_undirty_cached_timestamps(vp, true); fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, NULL, false); } out: fdisp_destroy(&fdi); return err; } /* * FreeBSD clears the SUID and SGID bits on any write by a non-root user. */ void fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred, struct thread *td) { struct fuse_data *data; struct mount *mp; struct vattr va; int dataflags; mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; ASSERT_VOP_LOCKED(vp, __func__); if (dataflags & FSESS_DEFAULT_PERMISSIONS) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) { fuse_internal_getattr(vp, &va, cred, td); if (va.va_mode & (S_ISUID | S_ISGID)) { mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID); /* Clear all vattr fields except mode */ vattr_null(&va); va.va_mode = mode; /* * Ignore fuse_internal_setattr's return value, * because at this point the write operation has * already succeeded and we don't want to return * failing status for that. */ (void)fuse_internal_setattr(vp, &va, td, NULL); } } } } #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len) { int i; for (i = 0; i < len; i++) { if (((char *)buf)[i]) return (0); } return (1); } #endif void fuse_internal_init(void) { fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK); fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK); } void fuse_internal_destroy(void) { counter_u64_free(fuse_lookup_cache_hits); counter_u64_free(fuse_lookup_cache_misses); } diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c index a240217fcc08..78398a990d7d 100644 --- a/sys/fs/fuse/fuse_io.c +++ b/sys/fs/fuse/fuse_io.c @@ -1,1131 +1,1132 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_file.h" #include "fuse_node.h" #include "fuse_internal.h" #include "fuse_ipc.h" #include "fuse_io.h" /* * Set in a struct buf to indicate that the write came from the buffer cache * and the originating cred and pid are no longer known. */ #define B_FUSEFS_WRITE_CACHE B_FS_FLAG1 SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , io, trace, "int", "char*"); static int fuse_inval_buf_range(struct vnode *vp, off_t filesize, off_t start, off_t end); static int fuse_read_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh); static int fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid); static int fuse_write_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, off_t filesize, int ioflag, bool pages); static int fuse_write_biobackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid); /* Invalidate a range of cached data, whether dirty of not */ static int fuse_inval_buf_range(struct vnode *vp, off_t filesize, off_t start, off_t end) { struct buf *bp; daddr_t left_lbn, end_lbn, right_lbn; off_t new_filesize; int iosize, left_on, right_on, right_blksize; iosize = fuse_iosize(vp); left_lbn = start / iosize; end_lbn = howmany(end, iosize); left_on = start & (iosize - 1); if (left_on != 0) { bp = getblk(vp, left_lbn, iosize, PCATCH, 0, 0); if ((bp->b_flags & B_CACHE) != 0 && bp->b_dirtyend >= left_on) { /* * Flush the dirty buffer, because we don't have a * byte-granular way to record which parts of the * buffer are valid. */ bwrite(bp); if (bp->b_error) return (bp->b_error); } else { brelse(bp); } } right_on = end & (iosize - 1); if (right_on != 0) { right_lbn = end / iosize; new_filesize = MAX(filesize, end); right_blksize = MIN(iosize, new_filesize - iosize * right_lbn); bp = getblk(vp, right_lbn, right_blksize, PCATCH, 0, 0); if ((bp->b_flags & B_CACHE) != 0 && bp->b_dirtyoff < right_on) { /* * Flush the dirty buffer, because we don't have a * byte-granular way to record which parts of the * buffer are valid. */ bwrite(bp); if (bp->b_error) return (bp->b_error); } else { brelse(bp); } } v_inval_buf_range(vp, left_lbn, end_lbn, iosize); return (0); } SDT_PROBE_DEFINE5(fusefs, , io, io_dispatch, "struct vnode*", "struct uio*", "int", "struct ucred*", "struct fuse_filehandle*"); SDT_PROBE_DEFINE4(fusefs, , io, io_dispatch_filehandles_closed, "struct vnode*", "struct uio*", "int", "struct ucred*"); int fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred, pid_t pid) { struct fuse_filehandle *fufh; int err, directio; int fflag; bool closefufh = false; MPASS(vp->v_type == VREG || vp->v_type == VDIR); fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE; err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid); if (err == EBADF && vnode_mount(vp)->mnt_flag & MNT_EXPORTED) { /* * nfsd will do I/O without first doing VOP_OPEN. We * must implicitly open the file here */ err = fuse_filehandle_open(vp, fflag, &fufh, curthread, cred); closefufh = true; } else if (err) { SDT_PROBE4(fusefs, , io, io_dispatch_filehandles_closed, vp, uio, ioflag, cred); printf("FUSE: io dispatch: filehandles are closed\n"); return err; } if (err) goto out; SDT_PROBE5(fusefs, , io, io_dispatch, vp, uio, ioflag, cred, fufh); /* * Ideally, when the daemon asks for direct io at open time, the * standard file flag should be set according to this, so that would * just change the default mode, which later on could be changed via * fcntl(2). * But this doesn't work, the O_DIRECT flag gets cleared at some point * (don't know where). So to make any use of the Fuse direct_io option, * we hardwire it into the file's private data (similarly to Linux, * btw.). */ directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp)); switch (uio->uio_rw) { case UIO_READ: + fuse_vnode_update(vp, FN_ATIMECHANGE); if (directio) { SDT_PROBE2(fusefs, , io, trace, 1, "direct read of vnode"); err = fuse_read_directbackend(vp, uio, cred, fufh); } else { SDT_PROBE2(fusefs, , io, trace, 1, "buffered read of vnode"); err = fuse_read_biobackend(vp, uio, ioflag, cred, fufh, pid); } break; case UIO_WRITE: fuse_vnode_update(vp, FN_MTIMECHANGE | FN_CTIMECHANGE); if (directio) { off_t start, end, filesize; bool pages = (ioflag & IO_VMIO) != 0; SDT_PROBE2(fusefs, , io, trace, 1, "direct write of vnode"); err = fuse_vnode_size(vp, &filesize, cred, curthread); if (err) goto out; start = uio->uio_offset; end = start + uio->uio_resid; if (!pages) { err = fuse_inval_buf_range(vp, filesize, start, end); if (err) return (err); } err = fuse_write_directbackend(vp, uio, cred, fufh, filesize, ioflag, pages); } else { SDT_PROBE2(fusefs, , io, trace, 1, "buffered write of vnode"); if (!fsess_opt_writeback(vnode_mount(vp))) ioflag |= IO_SYNC; err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag, pid); } fuse_internal_clear_suid_on_write(vp, cred, uio->uio_td); break; default: panic("uninterpreted mode passed to fuse_io_dispatch"); } out: if (closefufh) fuse_filehandle_close(vp, fufh, curthread, cred); return (err); } SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_start, "int", "int", "int", "int"); SDT_PROBE_DEFINE2(fusefs, , io, read_bio_backend_feed, "int", "struct buf*"); SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_end, "int", "ssize_t", "int", "struct buf*"); static int fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid) { struct buf *bp; struct mount *mp; struct fuse_data *data; daddr_t lbn, nextlbn; int bcount, nextsize; int err, n = 0, on = 0, seqcount; off_t filesize; const int biosize = fuse_iosize(vp); mp = vnode_mount(vp); data = fuse_get_mpdata(mp); if (uio->uio_offset < 0) return (EINVAL); seqcount = ioflag >> IO_SEQSHIFT; err = fuse_vnode_size(vp, &filesize, cred, curthread); if (err) return err; for (err = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if (fuse_isdeadfs(vp)) { err = ENXIO; break; } if (filesize - uio->uio_offset <= 0) break; lbn = uio->uio_offset / biosize; on = uio->uio_offset & (biosize - 1); if ((off_t)lbn * biosize >= filesize) { bcount = 0; } else if ((off_t)(lbn + 1) * biosize > filesize) { bcount = filesize - (off_t)lbn *biosize; } else { bcount = biosize; } nextlbn = lbn + 1; nextsize = MIN(biosize, filesize - nextlbn * biosize); SDT_PROBE4(fusefs, , io, read_bio_backend_start, biosize, (int)lbn, on, bcount); if (bcount < biosize) { /* If near EOF, don't do readahead */ err = bread(vp, lbn, bcount, NOCRED, &bp); } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { /* Try clustered read */ long totread = uio->uio_resid + on; seqcount = MIN(seqcount, data->max_readahead_blocks + 1); err = cluster_read(vp, filesize, lbn, bcount, NOCRED, totread, seqcount, 0, &bp); } else if (seqcount > 1 && data->max_readahead_blocks >= 1) { /* Try non-clustered readahead */ err = breadn(vp, lbn, bcount, &nextlbn, &nextsize, 1, NOCRED, &bp); } else { /* Just read what was requested */ err = bread(vp, lbn, bcount, NOCRED, &bp); } if (err) { brelse(bp); bp = NULL; break; } /* * on is the offset into the current bp. Figure out how many * bytes we can copy out of the bp. Note that bcount is * NOT DEV_BSIZE aligned. * * Then figure out how many bytes we can copy into the uio. */ n = 0; if (on < bcount - bp->b_resid) n = MIN((unsigned)(bcount - bp->b_resid - on), uio->uio_resid); if (n > 0) { SDT_PROBE2(fusefs, , io, read_bio_backend_feed, n, bp); err = uiomove(bp->b_data + on, n, uio); } vfs_bio_brelse(bp, ioflag); SDT_PROBE4(fusefs, , io, read_bio_backend_end, err, uio->uio_resid, n, bp); if (bp->b_resid > 0) { /* Short read indicates EOF */ break; } } return (err); } SDT_PROBE_DEFINE1(fusefs, , io, read_directbackend_start, "struct fuse_read_in*"); SDT_PROBE_DEFINE3(fusefs, , io, read_directbackend_complete, "struct fuse_dispatcher*", "struct fuse_read_in*", "struct uio*"); static int fuse_read_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh) { struct fuse_data *data; struct fuse_dispatcher fdi; struct fuse_read_in *fri; int err = 0; data = fuse_get_mpdata(vp->v_mount); if (uio->uio_resid == 0) return (0); fdisp_init(&fdi, 0); /* * XXX In "normal" case we use an intermediate kernel buffer for * transmitting data from daemon's context to ours. Eventually, we should * get rid of this. Anyway, if the target uio lives in sysspace (we are * called from pageops), and the input data doesn't need kernel-side * processing (we are not called from readdir) we can already invoke * an optimized, "peer-to-peer" I/O routine. */ while (uio->uio_resid > 0) { fdi.iosize = sizeof(*fri); fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio->uio_offset; fri->size = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_read); if (fuse_libabi_geq(data, 7, 9)) { /* See comment regarding FUSE_WRITE_LOCKOWNER */ fri->read_flags = 0; fri->flags = fufh_type_2_fflags(fufh->fufh_type); } SDT_PROBE1(fusefs, , io, read_directbackend_start, fri); if ((err = fdisp_wait_answ(&fdi))) goto out; SDT_PROBE3(fusefs, , io, read_directbackend_complete, &fdi, fri, uio); if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio))) break; if (fdi.iosize < fri->size) { /* * Short read. Should only happen at EOF or with * direct io. */ break; } } out: fdisp_destroy(&fdi); return (err); } static int fuse_write_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, off_t filesize, int ioflag, bool pages) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_data *data; struct fuse_write_in *fwi; struct fuse_write_out *fwo; struct fuse_dispatcher fdi; size_t chunksize; void *fwi_data; off_t as_written_offset; int diff; int err = 0; bool direct_io = fufh->fuse_open_flags & FOPEN_DIRECT_IO; bool wrote_anything = false; uint32_t write_flags; data = fuse_get_mpdata(vp->v_mount); /* * Don't set FUSE_WRITE_LOCKOWNER in write_flags. It can't be set * accurately when using POSIX AIO, libfuse doesn't use it, and I'm not * aware of any file systems that do. It was an attempt to add * Linux-style mandatory locking to the FUSE protocol, but mandatory * locking is deprecated even on Linux. See Linux commit * f33321141b273d60cbb3a8f56a5489baad82ba5e . */ /* * Set FUSE_WRITE_CACHE whenever we don't know the uid, gid, and/or pid * that originated a write. For example when writing from the * writeback cache. I don't know of a single file system that cares, * but the protocol says we're supposed to do this. */ write_flags = !pages && ( (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp)) || !fsess_opt_writeback(vnode_mount(vp))) ? 0 : FUSE_WRITE_CACHE; if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio_setoffset(uio, filesize); if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); fdisp_init(&fdi, 0); while (uio->uio_resid > 0) { size_t sizeof_fwi; if (fuse_libabi_geq(data, 7, 9)) { sizeof_fwi = sizeof(*fwi); } else { sizeof_fwi = FUSE_COMPAT_WRITE_IN_SIZE; } chunksize = MIN(uio->uio_resid, data->max_write); fdi.iosize = sizeof_fwi + chunksize; fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = uio->uio_offset; fwi->size = chunksize; fwi->write_flags = write_flags; if (fuse_libabi_geq(data, 7, 9)) { fwi->flags = fufh_type_2_fflags(fufh->fufh_type); } fwi_data = (char *)fdi.indata + sizeof_fwi; if ((err = uiomove(fwi_data, chunksize, uio))) break; retry: err = fdisp_wait_answ(&fdi); if (err == ERESTART || err == EINTR || err == EWOULDBLOCK) { /* * Rewind the uio so dofilewrite will know it's * incomplete */ uio->uio_resid += fwi->size; uio->uio_offset -= fwi->size; /* * Change ERESTART into EINTR because we can't rewind * uio->uio_iov. Basically, once uiomove(9) has been * called, it's impossible to restart a syscall. */ if (err == ERESTART) err = EINTR; break; } else if (err) { break; } else { wrote_anything = true; } fwo = ((struct fuse_write_out *)fdi.answ); /* Adjust the uio in the case of short writes */ diff = fwi->size - fwo->size; as_written_offset = uio->uio_offset - diff; if (as_written_offset - diff > filesize) fuse_vnode_setsize(vp, as_written_offset, false); if (as_written_offset - diff >= filesize) fvdat->flag &= ~FN_SIZECHANGE; if (diff < 0) { fuse_warn(data, FSESS_WARN_WROTE_LONG, "wrote more data than we provided it."); err = EINVAL; break; } else if (diff > 0) { /* Short write */ if (!direct_io) { fuse_warn(data, FSESS_WARN_SHORT_WRITE, "short writes are only allowed with " "direct_io."); } if (ioflag & IO_DIRECT) { /* Return early */ uio->uio_resid += diff; uio->uio_offset -= diff; break; } else { /* Resend the unwritten portion of data */ fdi.iosize = sizeof_fwi + diff; /* Refresh fdi without clearing data buffer */ fdisp_refresh_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred); fwi = fdi.indata; MPASS2(fwi == fdi.indata, "FUSE dispatcher " "reallocated despite no increase in " "size?"); void *src = (char*)fwi_data + fwo->size; memmove(fwi_data, src, diff); fwi->fh = fufh->fh_id; fwi->offset = as_written_offset; fwi->size = diff; fwi->write_flags = write_flags; goto retry; } } } fdisp_destroy(&fdi); if (wrote_anything) - fuse_vnode_undirty_cached_timestamps(vp); + fuse_vnode_undirty_cached_timestamps(vp, false); return (err); } SDT_PROBE_DEFINE6(fusefs, , io, write_biobackend_start, "int64_t", "int", "int", "struct uio*", "int", "bool"); SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_append_race, "long", "int"); SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_issue, "int", "struct buf*"); static int fuse_write_biobackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct buf *bp; daddr_t lbn; off_t filesize; int bcount; int n, on, seqcount, err = 0; bool last_page; const int biosize = fuse_iosize(vp); seqcount = ioflag >> IO_SEQSHIFT; KASSERT(uio->uio_rw == UIO_WRITE, ("fuse_write_biobackend mode")); if (vp->v_type != VREG) return (EIO); if (uio->uio_offset < 0) return (EINVAL); if (uio->uio_resid == 0) return (0); err = fuse_vnode_size(vp, &filesize, cred, curthread); if (err) return err; if (ioflag & IO_APPEND) uio_setoffset(uio, filesize); if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); do { bool direct_append, extending; if (fuse_isdeadfs(vp)) { err = ENXIO; break; } lbn = uio->uio_offset / biosize; on = uio->uio_offset & (biosize - 1); n = MIN((unsigned)(biosize - on), uio->uio_resid); again: /* Get or create a buffer for the write */ direct_append = uio->uio_offset == filesize && n; if (uio->uio_offset + n < filesize) { extending = false; if ((off_t)(lbn + 1) * biosize < filesize) { /* Not the file's last block */ bcount = biosize; } else { /* The file's last block */ bcount = filesize - (off_t)lbn * biosize; } } else { extending = true; bcount = on + n; } if (howmany(((off_t)lbn * biosize + on + n - 1), PAGE_SIZE) >= howmany(filesize, PAGE_SIZE)) last_page = true; else last_page = false; if (direct_append) { /* * Take care to preserve the buffer's B_CACHE state so * as not to cause an unnecessary read. */ bp = getblk(vp, lbn, on, PCATCH, 0, 0); if (bp != NULL) { uint32_t save = bp->b_flags & B_CACHE; allocbuf(bp, bcount); bp->b_flags |= save; } } else { bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); } if (!bp) { err = EINTR; break; } if (extending) { /* * Extend file _after_ locking buffer so we won't race * with other readers */ err = fuse_vnode_setsize(vp, uio->uio_offset + n, false); filesize = uio->uio_offset + n; fvdat->flag |= FN_SIZECHANGE; if (err) { brelse(bp); break; } } SDT_PROBE6(fusefs, , io, write_biobackend_start, lbn, on, n, uio, bcount, direct_append); /* * Issue a READ if B_CACHE is not set. In special-append * mode, B_CACHE is based on the buffer prior to the write * op and is typically set, avoiding the read. If a read * is required in special append mode, the server will * probably send us a short-read since we extended the file * on our end, resulting in b_resid == 0 and, thusly, * B_CACHE getting set. * * We can also avoid issuing the read if the write covers * the entire buffer. We have to make sure the buffer state * is reasonable in this case since we will not be initiating * I/O. See the comments in kern/vfs_bio.c's getblk() for * more information. * * B_CACHE may also be set due to the buffer being cached * normally. */ if (on == 0 && n == bcount) { bp->b_flags |= B_CACHE; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; } if ((bp->b_flags & B_CACHE) == 0) { bp->b_iocmd = BIO_READ; vfs_busy_pages(bp, 0); fuse_io_strategy(vp, bp); if ((err = bp->b_error)) { brelse(bp); break; } if (bp->b_resid > 0) { /* * Short read indicates EOF. Update file size * from the server and try again. */ SDT_PROBE2(fusefs, , io, trace, 1, "Short read during a RMW"); brelse(bp); err = fuse_vnode_size(vp, &filesize, cred, curthread); if (err) break; else goto again; } } if (bp->b_wcred == NOCRED) bp->b_wcred = crhold(cred); /* * If dirtyend exceeds file size, chop it down. This should * not normally occur but there is an append race where it * might occur XXX, so we log it. * * If the chopping creates a reverse-indexed or degenerate * situation with dirtyoff/end, we 0 both of them. */ if (bp->b_dirtyend > bcount) { SDT_PROBE2(fusefs, , io, write_biobackend_append_race, (long)bp->b_blkno * biosize, bp->b_dirtyend - bcount); bp->b_dirtyend = bcount; } if (bp->b_dirtyoff >= bp->b_dirtyend) bp->b_dirtyoff = bp->b_dirtyend = 0; /* * If the new write will leave a contiguous dirty * area, just update the b_dirtyoff and b_dirtyend, * otherwise force a write rpc of the old dirty area. * * While it is possible to merge discontiguous writes due to * our having a B_CACHE buffer ( and thus valid read data * for the hole), we don't because it could lead to * significant cache coherency problems with multiple clients, * especially if locking is implemented later on. * * as an optimization we could theoretically maintain * a linked list of discontinuous areas, but we would still * have to commit them separately so there isn't much * advantage to it except perhaps a bit of asynchronization. */ if (bp->b_dirtyend > 0 && (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { /* * Yes, we mean it. Write out everything to "storage" * immediately, without hesitation. (Apart from other * reasons: the only way to know if a write is valid * if its actually written out.) */ SDT_PROBE2(fusefs, , io, write_biobackend_issue, 0, bp); bwrite(bp); if (bp->b_error == EINTR) { err = EINTR; break; } goto again; } err = uiomove((char *)bp->b_data + on, n, uio); if (err) { bp->b_ioflags |= BIO_ERROR; bp->b_error = err; brelse(bp); break; /* TODO: vfs_bio_clrbuf like ffs_write does? */ } /* * Only update dirtyoff/dirtyend if not a degenerate * condition. */ if (n) { if (bp->b_dirtyend > 0) { bp->b_dirtyoff = MIN(on, bp->b_dirtyoff); bp->b_dirtyend = MAX((on + n), bp->b_dirtyend); } else { bp->b_dirtyoff = on; bp->b_dirtyend = on + n; } vfs_bio_set_valid(bp, on, n); } vfs_bio_set_flags(bp, ioflag); bp->b_flags |= B_FUSEFS_WRITE_CACHE; if (ioflag & IO_SYNC) { SDT_PROBE2(fusefs, , io, write_biobackend_issue, 2, bp); if (!(ioflag & IO_VMIO)) bp->b_flags &= ~B_FUSEFS_WRITE_CACHE; err = bwrite(bp); } else if (vm_page_count_severe() || buf_dirty_count_severe() || (ioflag & IO_ASYNC)) { bp->b_flags |= B_CLUSTEROK; SDT_PROBE2(fusefs, , io, write_biobackend_issue, 3, bp); bawrite(bp); } else if (on == 0 && n == bcount) { if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { bp->b_flags |= B_CLUSTEROK; SDT_PROBE2(fusefs, , io, write_biobackend_issue, 4, bp); cluster_write(vp, bp, filesize, seqcount, 0); } else { SDT_PROBE2(fusefs, , io, write_biobackend_issue, 5, bp); bawrite(bp); } } else if (ioflag & IO_DIRECT) { bp->b_flags |= B_CLUSTEROK; SDT_PROBE2(fusefs, , io, write_biobackend_issue, 6, bp); bawrite(bp); } else { bp->b_flags &= ~B_CLUSTEROK; SDT_PROBE2(fusefs, , io, write_biobackend_issue, 7, bp); bdwrite(bp); } if (err) break; } while (uio->uio_resid > 0 && n > 0); return (err); } int fuse_io_strategy(struct vnode *vp, struct buf *bp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh; struct ucred *cred; struct uio *uiop; struct uio uio; struct iovec io; off_t filesize; int error = 0; int fflag; /* We don't know the true pid when we're dealing with the cache */ pid_t pid = 0; const int biosize = fuse_iosize(vp); MPASS(vp->v_type == VREG || vp->v_type == VDIR); MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE); fflag = bp->b_iocmd == BIO_READ ? FREAD : FWRITE; cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred; error = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid); if (bp->b_iocmd == BIO_READ && error == EBADF) { /* * This may be a read-modify-write operation on a cached file * opened O_WRONLY. The FUSE protocol allows this. */ error = fuse_filehandle_get(vp, FWRITE, &fufh, cred, pid); } if (error) { printf("FUSE: strategy: filehandles are closed\n"); bp->b_ioflags |= BIO_ERROR; bp->b_error = error; bufdone(bp); return (error); } uiop = &uio; uiop->uio_iov = &io; uiop->uio_iovcnt = 1; uiop->uio_segflg = UIO_SYSSPACE; uiop->uio_td = curthread; /* * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We * do this here so we do not have to do it in all the code that * calls us. */ bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; KASSERT(!(bp->b_flags & B_DONE), ("fuse_io_strategy: bp %p already marked done", bp)); if (bp->b_iocmd == BIO_READ) { ssize_t left; io.iov_len = uiop->uio_resid = bp->b_bcount; io.iov_base = bp->b_data; uiop->uio_rw = UIO_READ; uiop->uio_offset = ((off_t)bp->b_lblkno) * biosize; error = fuse_read_directbackend(vp, uiop, cred, fufh); /* * Store the amount we failed to read in the buffer's private * field, so callers can truncate the file if necessary' */ if (!error && uiop->uio_resid) { int nread = bp->b_bcount - uiop->uio_resid; left = uiop->uio_resid; bzero((char *)bp->b_data + nread, left); if ((fvdat->flag & FN_SIZECHANGE) == 0) { /* * A short read with no error, when not using * direct io, and when no writes are cached, * indicates EOF caused by a server-side * truncation. Clear the attr cache so we'll * pick up the new file size and timestamps. * * We must still bzero the remaining buffer so * uninitialized data doesn't get exposed by a * future truncate that extends the file. * * To prevent lock order problems, we must * truncate the file upstack, not here. */ SDT_PROBE2(fusefs, , io, trace, 1, "Short read of a clean file"); fuse_vnode_clear_attr_cache(vp); } else { /* * If dirty writes _are_ cached beyond EOF, * that indicates a newly created hole that the * server doesn't know about. Those don't pose * any problem. * XXX: we don't currently track whether dirty * writes are cached beyond EOF, before EOF, or * both. */ SDT_PROBE2(fusefs, , io, trace, 1, "Short read of a dirty file"); uiop->uio_resid = 0; } } if (error) { bp->b_ioflags |= BIO_ERROR; bp->b_error = error; } } else { /* * Setup for actual write */ /* * If the file's size is cached, use that value, even if the * cache is expired. At this point we're already committed to * writing something. If the FUSE server has changed the * file's size behind our back, it's too late for us to do * anything about it. In particular, we can't invalidate any * part of the file's buffers because VOP_STRATEGY is called * with them already locked. */ filesize = fvdat->cached_attrs.va_size; /* filesize must've been cached by fuse_vnop_open. */ KASSERT(filesize != VNOVAL, ("filesize should've been cached")); if ((off_t)bp->b_lblkno * biosize + bp->b_dirtyend > filesize) bp->b_dirtyend = filesize - (off_t)bp->b_lblkno * biosize; if (bp->b_dirtyend > bp->b_dirtyoff) { io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff; uiop->uio_offset = (off_t)bp->b_lblkno * biosize + bp->b_dirtyoff; io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; uiop->uio_rw = UIO_WRITE; bool pages = bp->b_flags & B_FUSEFS_WRITE_CACHE; error = fuse_write_directbackend(vp, uiop, cred, fufh, filesize, 0, pages); if (error == EINTR || error == ETIMEDOUT) { bp->b_flags &= ~(B_INVAL | B_NOCACHE); if ((bp->b_flags & B_PAGING) == 0) { bdirty(bp); bp->b_flags &= ~B_DONE; } if ((error == EINTR || error == ETIMEDOUT) && (bp->b_flags & B_ASYNC) == 0) bp->b_flags |= B_EINTR; } else { if (error) { bp->b_ioflags |= BIO_ERROR; bp->b_flags |= B_INVAL; bp->b_error = error; } bp->b_dirtyoff = bp->b_dirtyend = 0; } } else { bp->b_resid = 0; bufdone(bp); return (0); } } bp->b_resid = uiop->uio_resid; bufdone(bp); return (error); } int fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td) { return (vn_fsync_buf(vp, waitfor)); } /* * Flush and invalidate all dirty buffers. If another process is already * doing the flush, just wait for completion. */ int fuse_io_invalbuf(struct vnode *vp, struct thread *td) { struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; if (VN_IS_DOOMED(vp)) return 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf"); while (fvdat->flag & FN_FLUSHINPROG) { struct proc *p = td->td_proc; if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) return EIO; fvdat->flag |= FN_FLUSHWANT; tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz); error = 0; if (p != NULL) { PROC_LOCK(p); if (SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist)) error = EINTR; PROC_UNLOCK(p); } if (error == EINTR) return EINTR; } fvdat->flag |= FN_FLUSHINPROG; if (vp->v_bufobj.bo_object != NULL) { VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); while (error) { if (error == ERESTART || error == EINTR) { fvdat->flag &= ~FN_FLUSHINPROG; if (fvdat->flag & FN_FLUSHWANT) { fvdat->flag &= ~FN_FLUSHWANT; wakeup(&fvdat->flag); } return EINTR; } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); } fvdat->flag &= ~FN_FLUSHINPROG; if (fvdat->flag & FN_FLUSHWANT) { fvdat->flag &= ~FN_FLUSHWANT; wakeup(&fvdat->flag); } return (error); } diff --git a/sys/fs/fuse/fuse_node.c b/sys/fs/fuse/fuse_node.c index c8982c5a48a1..f74de2faa702 100644 --- a/sys/fs/fuse/fuse_node.c +++ b/sys/fs/fuse/fuse_node.c @@ -1,515 +1,523 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_node.h" #include "fuse_internal.h" #include "fuse_io.h" #include "fuse_ipc.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , node, trace, "int", "char*"); MALLOC_DEFINE(M_FUSEVN, "fuse_vnode", "fuse vnode private data"); static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS); static counter_u64_t fuse_node_count; SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, node_count, CTLFLAG_RD, &fuse_node_count, "Count of FUSE vnodes"); int fuse_data_cache_mode = FUSE_CACHE_WT; /* * DEPRECATED * This sysctl is no longer needed as of fuse protocol 7.23. Individual * servers can select the cache behavior they need for each mountpoint: * - writethrough: the default * - writeback: set FUSE_WRITEBACK_CACHE in fuse_init_out.flags * - uncached: set FOPEN_DIRECT_IO for every file * The sysctl is retained primarily for use by jails supporting older FUSE * protocols. It may be removed entirely once FreeBSD 11.3 and 12.0 are EOL. */ SYSCTL_PROC(_vfs_fusefs, OID_AUTO, data_cache_mode, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &fuse_data_cache_mode, 0, sysctl_fuse_cache_mode, "I", "Zero: disable caching of FUSE file data; One: write-through caching " "(default); Two: write-back caching (generally unsafe)"); static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS) { int val, error; val = *(int *)arg1; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); switch (val) { case FUSE_CACHE_UC: case FUSE_CACHE_WT: case FUSE_CACHE_WB: *(int *)arg1 = val; break; default: return (EDOM); } return (0); } static void fuse_vnode_init(struct vnode *vp, struct fuse_vnode_data *fvdat, uint64_t nodeid, enum vtype vtyp) { fvdat->nid = nodeid; LIST_INIT(&fvdat->handles); vattr_null(&fvdat->cached_attrs); if (nodeid == FUSE_ROOT_ID) { vp->v_vflag |= VV_ROOT; } vp->v_type = vtyp; vp->v_data = fvdat; counter_u64_add(fuse_node_count, 1); } void fuse_vnode_destroy(struct vnode *vp) { struct fuse_vnode_data *fvdat = vp->v_data; vp->v_data = NULL; KASSERT(LIST_EMPTY(&fvdat->handles), ("Destroying fuse vnode with open files!")); free(fvdat, M_FUSEVN); counter_u64_add(fuse_node_count, -1); } int fuse_vnode_cmp(struct vnode *vp, void *nidp) { return (VTOI(vp) != *((uint64_t *)nidp)); } SDT_PROBE_DEFINE3(fusefs, , node, stale_vnode, "struct vnode*", "enum vtype", "uint64_t"); static int fuse_vnode_alloc(struct mount *mp, struct thread *td, uint64_t nodeid, enum vtype vtyp, struct vnode **vpp) { struct fuse_data *data; struct fuse_vnode_data *fvdat; struct vnode *vp2; int err = 0; data = fuse_get_mpdata(mp); if (vtyp == VNON) { return EINVAL; } *vpp = NULL; err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, vpp, fuse_vnode_cmp, &nodeid); if (err) return (err); if (*vpp) { if ((*vpp)->v_type != vtyp) { /* * STALE vnode! This probably indicates a buggy * server, but it could also be the result of a race * between FUSE_LOOKUP and another client's * FUSE_UNLINK/FUSE_CREATE */ SDT_PROBE3(fusefs, , node, stale_vnode, *vpp, vtyp, nodeid); fuse_internal_vnode_disappear(*vpp); lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); *vpp = NULL; return (EAGAIN); } MPASS((*vpp)->v_data != NULL); MPASS(VTOFUD(*vpp)->nid == nodeid); SDT_PROBE2(fusefs, , node, trace, 1, "vnode taken from hash"); return (0); } fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO); switch (vtyp) { case VFIFO: err = getnewvnode("fuse", mp, &fuse_fifoops, vpp); break; default: err = getnewvnode("fuse", mp, &fuse_vnops, vpp); break; } if (err) { free(fvdat, M_FUSEVN); return (err); } lockmgr((*vpp)->v_vnlock, LK_EXCLUSIVE, NULL); fuse_vnode_init(*vpp, fvdat, nodeid, vtyp); err = insmntque(*vpp, mp); ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); if (err) { lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } /* Disallow async reads for fifos because UFS does. I don't know why */ if (data->dataflags & FSESS_ASYNC_READ && vtyp != VFIFO) VN_LOCK_ASHARE(*vpp); err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, &vp2, fuse_vnode_cmp, &nodeid); if (err) { lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } if (vp2 != NULL) { *vpp = vp2; return (0); } ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); return (0); } int fuse_vnode_get(struct mount *mp, struct fuse_entry_out *feo, uint64_t nodeid, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp) { struct thread *td = (cnp != NULL ? cnp->cn_thread : curthread); /* * feo should only be NULL for the root directory, which (when libfuse * is used) always has generation 0 */ uint64_t generation = feo ? feo->generation : 0; int err = 0; err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp); if (err) { return err; } if (dvp != NULL) { MPASS(cnp && (cnp->cn_flags & ISDOTDOT) == 0); MPASS(cnp && !(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')); fuse_vnode_setparent(*vpp, dvp); } if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0 && feo != NULL && (feo->entry_valid != 0 || feo->entry_valid_nsec != 0)) { struct timespec timeout; ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get"); ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get"); fuse_validity_2_timespec(feo, &timeout); cache_enter_time(dvp, *vpp, cnp, &timeout, NULL); } VTOFUD(*vpp)->generation = generation; /* * In userland, libfuse uses cached lookups for dot and dotdot entries, * thus it does not really bump the nlookup counter for forget. * Follow the same semantic and avoid the bump in order to keep * nlookup counters consistent. */ if (cnp == NULL || ((cnp->cn_flags & ISDOTDOT) == 0 && (cnp->cn_namelen != 1 || cnp->cn_nameptr[0] != '.'))) VTOFUD(*vpp)->nlookup++; return 0; } /* * Called for every fusefs vnode open to initialize the vnode (not * fuse_filehandle) for use */ void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td) { if (vnode_vtype(vp) == VREG) vnode_create_vobject(vp, 0, td); } int fuse_vnode_savesize(struct vnode *vp, struct ucred *cred, pid_t pid) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct thread *td = curthread; struct fuse_filehandle *fufh = NULL; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; int err = 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_extend"); if (fuse_isdeadfs(vp)) { return EBADF; } if (vnode_vtype(vp) == VDIR) { return EISDIR; } if (vfs_isrdonly(vnode_mount(vp))) { return EROFS; } if (cred == NULL) { cred = td->td_ucred; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; /* Truncate to a new value. */ MPASS((fvdat->flag & FN_SIZECHANGE) != 0); fsai->size = fvdat->cached_attrs.va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == 0) fvdat->flag &= ~FN_SIZECHANGE; return err; } /* * Adjust the vnode's size to a new value. * * If the new value came from the server, such as from a FUSE_GETATTR * operation, set `from_server` true. But if it came from a local operation, * such as write(2) or truncate(2), set `from_server` false. */ int fuse_vnode_setsize(struct vnode *vp, off_t newsize, bool from_server) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct vattr *attrs; off_t oldsize; size_t iosize; struct buf *bp = NULL; int err = 0; ASSERT_VOP_ELOCKED(vp, "fuse_vnode_setsize"); iosize = fuse_iosize(vp); oldsize = fvdat->cached_attrs.va_size; fvdat->cached_attrs.va_size = newsize; if ((attrs = VTOVA(vp)) != NULL) attrs->va_size = newsize; if (newsize < oldsize) { daddr_t lbn; err = vtruncbuf(vp, newsize, fuse_iosize(vp)); if (err) goto out; if (newsize % iosize == 0) goto out; /* * Zero the contents of the last partial block. * Sure seems like vtruncbuf should do this for us. */ lbn = newsize / iosize; bp = getblk(vp, lbn, iosize, PCATCH, 0, 0); if (!bp) { err = EINTR; goto out; } if (!(bp->b_flags & B_CACHE)) goto out; /* Nothing to do */ MPASS(bp->b_flags & B_VMIO); vfs_bio_clrbuf(bp); bp->b_dirtyend = MIN(bp->b_dirtyend, newsize - lbn * iosize); } else if (from_server && newsize > oldsize && oldsize != VNOVAL) { /* * The FUSE server changed the file size behind our back. We * should invalidate the entire cache. */ daddr_t left_lbn, end_lbn; left_lbn = oldsize / iosize; end_lbn = howmany(newsize, iosize); v_inval_buf_range(vp, 0, end_lbn, iosize); } out: if (bp) brelse(bp); vnode_pager_setsize(vp, newsize); return err; } /* Get the current, possibly dirty, size of the file */ int fuse_vnode_size(struct vnode *vp, off_t *filesize, struct ucred *cred, struct thread *td) { struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; if (!(fvdat->flag & FN_SIZECHANGE) && (!fuse_vnode_attr_cache_valid(vp) || fvdat->cached_attrs.va_size == VNOVAL)) error = fuse_internal_do_getattr(vp, NULL, cred, td); if (!error) *filesize = fvdat->cached_attrs.va_size; return error; } void -fuse_vnode_undirty_cached_timestamps(struct vnode *vp) +fuse_vnode_undirty_cached_timestamps(struct vnode *vp, bool atime) { struct fuse_vnode_data *fvdat = VTOFUD(vp); fvdat->flag &= ~(FN_MTIMECHANGE | FN_CTIMECHANGE); + if (atime) + fvdat->flag &= ~FN_ATIMECHANGE; } /* Update a fuse file's cached timestamps */ void fuse_vnode_update(struct vnode *vp, int flags) { struct fuse_vnode_data *fvdat = VTOFUD(vp); - struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); + struct mount *mp = vnode_mount(vp); + struct fuse_data *data = fuse_get_mpdata(mp); struct timespec ts; vfs_timestamp(&ts); if (data->time_gran > 1) ts.tv_nsec = rounddown(ts.tv_nsec, data->time_gran); + if (mp->mnt_flag & MNT_NOATIME) + flags &= ~FN_ATIMECHANGE; + + if (flags & FN_ATIMECHANGE) + fvdat->cached_attrs.va_atime = ts; if (flags & FN_MTIMECHANGE) fvdat->cached_attrs.va_mtime = ts; if (flags & FN_CTIMECHANGE) fvdat->cached_attrs.va_ctime = ts; fvdat->flag |= flags; } void fuse_node_init(void) { fuse_node_count = counter_u64_alloc(M_WAITOK); } void fuse_node_destroy(void) { counter_u64_free(fuse_node_count); } diff --git a/sys/fs/fuse/fuse_node.h b/sys/fs/fuse/fuse_node.h index 8f69ef9a08f3..3a33c57001dc 100644 --- a/sys/fs/fuse/fuse_node.h +++ b/sys/fs/fuse/fuse_node.h @@ -1,210 +1,211 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FUSE_NODE_H_ #define _FUSE_NODE_H_ #include #include #include #include "fuse_file.h" #define FN_REVOKED 0x00000020 #define FN_FLUSHINPROG 0x00000040 #define FN_FLUSHWANT 0x00000080 /* * Indicates that the file's size is dirty; the kernel has changed it but not * yet send the change to the daemon. When this bit is set, the * cache_attrs.va_size field does not time out. */ #define FN_SIZECHANGE 0x00000100 #define FN_DIRECTIO 0x00000200 /* Indicates that parent_nid is valid */ #define FN_PARENT_NID 0x00000400 /* * Indicates that the file's cached timestamps are dirty. They will be flushed * during the next SETATTR or WRITE. Until then, the cached fields will not * time out. */ #define FN_MTIMECHANGE 0x00000800 #define FN_CTIMECHANGE 0x00001000 +#define FN_ATIMECHANGE 0x00002000 struct fuse_vnode_data { /** self **/ uint64_t nid; uint64_t generation; /** parent **/ uint64_t parent_nid; /** I/O **/ /* List of file handles for all of the vnode's open file descriptors */ LIST_HEAD(, fuse_filehandle) handles; /** flags **/ uint32_t flag; /** meta **/ /* The monotonic time after which the attr cache is invalid */ struct bintime attr_cache_timeout; /* * Monotonic time after which the entry is invalid. Used for lookups * by nodeid instead of pathname. */ struct bintime entry_cache_timeout; struct vattr cached_attrs; uint64_t nlookup; enum vtype vtype; }; /* * This overlays the fid structure (see mount.h). Mostly the same as the types * used by UFS and ext2. */ struct fuse_fid { uint16_t len; /* Length of structure. */ uint16_t pad; /* Force 32-bit alignment. */ uint32_t gen; /* Generation number. */ uint64_t nid; /* FUSE node id. */ }; #define VTOFUD(vp) \ ((struct fuse_vnode_data *)((vp)->v_data)) #define VTOI(vp) (VTOFUD(vp)->nid) static inline bool fuse_vnode_attr_cache_valid(struct vnode *vp) { struct bintime now; getbinuptime(&now); return (bintime_cmp(&(VTOFUD(vp)->attr_cache_timeout), &now, >)); } static inline struct vattr* VTOVA(struct vnode *vp) { if (fuse_vnode_attr_cache_valid(vp)) return &(VTOFUD(vp)->cached_attrs); else return NULL; } static inline void fuse_vnode_clear_attr_cache(struct vnode *vp) { bintime_clear(&VTOFUD(vp)->attr_cache_timeout); } static uint32_t inline fuse_vnode_hash(uint64_t id) { return (fnv_32_buf(&id, sizeof(id), FNV1_32_INIT)); } #define VTOILLU(vp) ((uint64_t)(VTOFUD(vp) ? VTOI(vp) : 0)) #define FUSE_NULL_ID 0 extern struct vop_vector fuse_fifoops; extern struct vop_vector fuse_vnops; int fuse_vnode_cmp(struct vnode *vp, void *nidp); static inline void fuse_vnode_setparent(struct vnode *vp, struct vnode *dvp) { if (dvp != NULL && vp->v_type == VDIR) { MPASS(dvp->v_type == VDIR); VTOFUD(vp)->parent_nid = VTOI(dvp); VTOFUD(vp)->flag |= FN_PARENT_NID; } else { VTOFUD(vp)->flag &= ~FN_PARENT_NID; } } int fuse_vnode_size(struct vnode *vp, off_t *filesize, struct ucred *cred, struct thread *td); void fuse_vnode_destroy(struct vnode *vp); int fuse_vnode_get(struct mount *mp, struct fuse_entry_out *feo, uint64_t nodeid, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp); void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td); int fuse_vnode_savesize(struct vnode *vp, struct ucred *cred, pid_t pid); int fuse_vnode_setsize(struct vnode *vp, off_t newsize, bool from_server); -void fuse_vnode_undirty_cached_timestamps(struct vnode *vp); +void fuse_vnode_undirty_cached_timestamps(struct vnode *vp, bool atime); void fuse_vnode_update(struct vnode *vp, int flags); void fuse_node_init(void); void fuse_node_destroy(void); #endif /* _FUSE_NODE_H_ */ diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c index 5630eeeb9e65..d0f4cae302c1 100644 --- a/sys/fs/fuse/fuse_vnops.c +++ b/sys/fs/fuse/fuse_vnops.c @@ -1,2730 +1,2738 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_file.h" #include "fuse_internal.h" #include "fuse_ipc.h" #include "fuse_node.h" #include "fuse_io.h" #include /* Maximum number of hardlinks to a single FUSE file */ #define FUSE_LINK_MAX UINT32_MAX SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , vnops, trace, "int", "char*"); /* vnode ops */ static vop_access_t fuse_vnop_access; static vop_advlock_t fuse_vnop_advlock; static vop_bmap_t fuse_vnop_bmap; static vop_close_t fuse_fifo_close; static vop_close_t fuse_vnop_close; static vop_copy_file_range_t fuse_vnop_copy_file_range; static vop_create_t fuse_vnop_create; static vop_deleteextattr_t fuse_vnop_deleteextattr; static vop_fdatasync_t fuse_vnop_fdatasync; static vop_fsync_t fuse_vnop_fsync; static vop_getattr_t fuse_vnop_getattr; static vop_getextattr_t fuse_vnop_getextattr; static vop_inactive_t fuse_vnop_inactive; static vop_ioctl_t fuse_vnop_ioctl; static vop_link_t fuse_vnop_link; static vop_listextattr_t fuse_vnop_listextattr; static vop_lookup_t fuse_vnop_lookup; static vop_mkdir_t fuse_vnop_mkdir; static vop_mknod_t fuse_vnop_mknod; static vop_open_t fuse_vnop_open; static vop_pathconf_t fuse_vnop_pathconf; static vop_read_t fuse_vnop_read; static vop_readdir_t fuse_vnop_readdir; static vop_readlink_t fuse_vnop_readlink; static vop_reclaim_t fuse_vnop_reclaim; static vop_remove_t fuse_vnop_remove; static vop_rename_t fuse_vnop_rename; static vop_rmdir_t fuse_vnop_rmdir; static vop_setattr_t fuse_vnop_setattr; static vop_setextattr_t fuse_vnop_setextattr; static vop_strategy_t fuse_vnop_strategy; static vop_symlink_t fuse_vnop_symlink; static vop_write_t fuse_vnop_write; static vop_getpages_t fuse_vnop_getpages; static vop_print_t fuse_vnop_print; static vop_vptofh_t fuse_vnop_vptofh; struct vop_vector fuse_fifoops = { .vop_default = &fifo_specops, .vop_access = fuse_vnop_access, .vop_close = fuse_fifo_close, .vop_fsync = fuse_vnop_fsync, .vop_getattr = fuse_vnop_getattr, .vop_inactive = fuse_vnop_inactive, .vop_pathconf = fuse_vnop_pathconf, .vop_print = fuse_vnop_print, .vop_read = VOP_PANIC, .vop_reclaim = fuse_vnop_reclaim, .vop_setattr = fuse_vnop_setattr, .vop_write = VOP_PANIC, .vop_vptofh = fuse_vnop_vptofh, }; VFS_VOP_VECTOR_REGISTER(fuse_fifoops); struct vop_vector fuse_vnops = { .vop_allocate = VOP_EINVAL, .vop_default = &default_vnodeops, .vop_access = fuse_vnop_access, .vop_advlock = fuse_vnop_advlock, .vop_bmap = fuse_vnop_bmap, .vop_close = fuse_vnop_close, .vop_copy_file_range = fuse_vnop_copy_file_range, .vop_create = fuse_vnop_create, .vop_deleteextattr = fuse_vnop_deleteextattr, .vop_fsync = fuse_vnop_fsync, .vop_fdatasync = fuse_vnop_fdatasync, .vop_getattr = fuse_vnop_getattr, .vop_getextattr = fuse_vnop_getextattr, .vop_inactive = fuse_vnop_inactive, .vop_ioctl = fuse_vnop_ioctl, .vop_link = fuse_vnop_link, .vop_listextattr = fuse_vnop_listextattr, .vop_lookup = fuse_vnop_lookup, .vop_mkdir = fuse_vnop_mkdir, .vop_mknod = fuse_vnop_mknod, .vop_open = fuse_vnop_open, .vop_pathconf = fuse_vnop_pathconf, /* * TODO: implement vop_poll after upgrading to protocol 7.21. * FUSE_POLL was added in protocol 7.11, but it's kind of broken until * 7.21, which adds the ability for the client to choose which poll * events it wants, and for a client to deregister a file handle */ .vop_read = fuse_vnop_read, .vop_readdir = fuse_vnop_readdir, .vop_readlink = fuse_vnop_readlink, .vop_reclaim = fuse_vnop_reclaim, .vop_remove = fuse_vnop_remove, .vop_rename = fuse_vnop_rename, .vop_rmdir = fuse_vnop_rmdir, .vop_setattr = fuse_vnop_setattr, .vop_setextattr = fuse_vnop_setextattr, .vop_strategy = fuse_vnop_strategy, .vop_symlink = fuse_vnop_symlink, .vop_write = fuse_vnop_write, .vop_getpages = fuse_vnop_getpages, .vop_print = fuse_vnop_print, .vop_vptofh = fuse_vnop_vptofh, }; VFS_VOP_VECTOR_REGISTER(fuse_vnops); uma_zone_t fuse_pbuf_zone; /* Check permission for extattr operations, much like extattr_check_cred */ static int fuse_extattr_check_cred(struct vnode *vp, int ns, struct ucred *cred, struct thread *td, accmode_t accmode) { struct mount *mp = vnode_mount(vp); struct fuse_data *data = fuse_get_mpdata(mp); int default_permissions = data->dataflags & FSESS_DEFAULT_PERMISSIONS; /* * Kernel-invoked always succeeds. */ if (cred == NOCRED) return (0); /* * Do not allow privileged processes in jail to directly manipulate * system attributes. */ switch (ns) { case EXTATTR_NAMESPACE_SYSTEM: if (default_permissions) { return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); } return (0); case EXTATTR_NAMESPACE_USER: if (default_permissions) { return (fuse_internal_access(vp, accmode, td, cred)); } return (0); default: return (EPERM); } } /* Get a filehandle for a directory */ static int fuse_filehandle_get_dir(struct vnode *vp, struct fuse_filehandle **fufhp, struct ucred *cred, pid_t pid) { if (fuse_filehandle_get(vp, FREAD, fufhp, cred, pid) == 0) return 0; return fuse_filehandle_get(vp, FEXEC, fufhp, cred, pid); } /* Send FUSE_FLUSH for this vnode */ static int fuse_flush(struct vnode *vp, struct ucred *cred, pid_t pid, int fflag) { struct fuse_flush_in *ffi; struct fuse_filehandle *fufh; struct fuse_dispatcher fdi; struct thread *td = curthread; struct mount *mp = vnode_mount(vp); int err; if (fsess_not_impl(vnode_mount(vp), FUSE_FLUSH)) return 0; err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid); if (err) return err; fdisp_init(&fdi, sizeof(*ffi)); fdisp_make_vp(&fdi, FUSE_FLUSH, vp, td, cred); ffi = fdi.indata; ffi->fh = fufh->fh_id; /* * If the file has a POSIX lock then we're supposed to set lock_owner. * If not, then lock_owner is undefined. So we may as well always set * it. */ ffi->lock_owner = td->td_proc->p_pid; err = fdisp_wait_answ(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_FLUSH); err = 0; } fdisp_destroy(&fdi); return err; } /* Close wrapper for fifos. */ static int fuse_fifo_close(struct vop_close_args *ap) { return (fifo_specops.vop_close(ap)); } /* Send FUSE_LSEEK for this node */ static int fuse_vnop_do_lseek(struct vnode *vp, struct thread *td, struct ucred *cred, pid_t pid, off_t *offp, int whence) { struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_lseek_in *flsi; struct fuse_lseek_out *flso; struct mount *mp = vnode_mount(vp); int err; ASSERT_VOP_LOCKED(vp, __func__); err = fuse_filehandle_getrw(vp, FREAD, &fufh, cred, pid); if (err) return (err); fdisp_init(&fdi, sizeof(*flsi)); fdisp_make_vp(&fdi, FUSE_LSEEK, vp, td, cred); flsi = fdi.indata; flsi->fh = fufh->fh_id; flsi->offset = *offp; flsi->whence = whence; err = fdisp_wait_answ(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_LSEEK); } else if (err == 0) { fsess_set_impl(mp, FUSE_LSEEK); flso = fdi.answ; *offp = flso->offset; } fdisp_destroy(&fdi); return (err); } /* struct vnop_access_args { struct vnode *a_vp; #if VOP_ACCESS_TAKES_ACCMODE_T accmode_t a_accmode; #else int a_mode; #endif struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; int accmode = ap->a_accmode; struct ucred *cred = ap->a_cred; struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err; if (fuse_isdeadfs(vp)) { if (vnode_isvroot(vp)) { return 0; } return ENXIO; } if (!(data->dataflags & FSESS_INITED)) { if (vnode_isvroot(vp)) { if (priv_check_cred(cred, PRIV_VFS_ADMIN) || (fuse_match_cred(data->daemoncred, cred) == 0)) { return 0; } } return EBADF; } if (vnode_islnk(vp)) { return 0; } err = fuse_internal_access(vp, accmode, ap->a_td, ap->a_cred); return err; } /* * struct vop_advlock_args { * struct vop_generic_args a_gen; * struct vnode *a_vp; * void *a_id; * int a_op; * struct flock *a_fl; * int a_flags; * } */ static int fuse_vnop_advlock(struct vop_advlock_args *ap) { struct vnode *vp = ap->a_vp; struct flock *fl = ap->a_fl; struct thread *td = curthread; struct ucred *cred = td->td_ucred; pid_t pid = td->td_proc->p_pid; struct fuse_filehandle *fufh; struct fuse_dispatcher fdi; struct fuse_lk_in *fli; struct fuse_lk_out *flo; enum fuse_opcode op; int dataflags, err; int flags = ap->a_flags; dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; if (fuse_isdeadfs(vp)) { return ENXIO; } switch(ap->a_op) { case F_GETLK: op = FUSE_GETLK; break; case F_SETLK: if (flags & F_WAIT) op = FUSE_SETLKW; else op = FUSE_SETLK; break; case F_UNLCK: op = FUSE_SETLK; break; default: return EINVAL; } if (!(dataflags & FSESS_POSIX_LOCKS)) return vop_stdadvlock(ap); /* FUSE doesn't properly support flock until protocol 7.17 */ if (flags & F_FLOCK) return vop_stdadvlock(ap); vn_lock(vp, LK_SHARED | LK_RETRY); err = fuse_filehandle_get_anyflags(vp, &fufh, cred, pid); if (err) goto out; fdisp_init(&fdi, sizeof(*fli)); fdisp_make_vp(&fdi, op, vp, td, cred); fli = fdi.indata; fli->fh = fufh->fh_id; fli->owner = td->td_proc->p_pid; fli->lk.start = fl->l_start; if (fl->l_len != 0) fli->lk.end = fl->l_start + fl->l_len - 1; else fli->lk.end = INT64_MAX; fli->lk.type = fl->l_type; fli->lk.pid = td->td_proc->p_pid; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == 0 && op == FUSE_GETLK) { flo = fdi.answ; fl->l_type = flo->lk.type; fl->l_pid = flo->lk.pid; if (flo->lk.type != F_UNLCK) { fl->l_start = flo->lk.start; if (flo->lk.end == INT64_MAX) fl->l_len = 0; else fl->l_len = flo->lk.end - flo->lk.start + 1; fl->l_start = flo->lk.start; } } out: VOP_UNLOCK(vp); return err; } /* { struct vnode *a_vp; daddr_t a_bn; struct bufobj **a_bop; daddr_t *a_bnp; int *a_runp; int *a_runb; } */ static int fuse_vnop_bmap(struct vop_bmap_args *ap) { struct vnode *vp = ap->a_vp; struct bufobj **bo = ap->a_bop; struct thread *td = curthread; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_bmap_in *fbi; struct fuse_bmap_out *fbo; struct fuse_data *data; struct fuse_vnode_data *fvdat = VTOFUD(vp); uint64_t biosize; off_t fsize; daddr_t lbn = ap->a_bn; daddr_t *pbn = ap->a_bnp; int *runp = ap->a_runp; int *runb = ap->a_runb; int error = 0; int maxrun; if (fuse_isdeadfs(vp)) { return ENXIO; } mp = vnode_mount(vp); data = fuse_get_mpdata(mp); biosize = fuse_iosize(vp); maxrun = MIN(vp->v_mount->mnt_iosize_max / biosize - 1, data->max_readahead_blocks); if (bo != NULL) *bo = &vp->v_bufobj; /* * The FUSE_BMAP operation does not include the runp and runb * variables, so we must guess. Report nonzero contiguous runs so * cluster_read will combine adjacent reads. It's worthwhile to reduce * upcalls even if we don't know the true physical layout of the file. * * FUSE file systems may opt out of read clustering in two ways: * * mounting with -onoclusterr * * Setting max_readahead <= maxbcachebuf during FUSE_INIT */ if (runb != NULL) *runb = MIN(lbn, maxrun); if (runp != NULL && maxrun == 0) *runp = 0; else if (runp != NULL) { /* * If the file's size is cached, use that value to calculate * runp, even if the cache is expired. runp is only advisory, * and the risk of getting it wrong is not worth the cost of * another upcall. */ if (fvdat->cached_attrs.va_size != VNOVAL) fsize = fvdat->cached_attrs.va_size; else error = fuse_vnode_size(vp, &fsize, td->td_ucred, td); if (error == 0) *runp = MIN(MAX(0, fsize / (off_t)biosize - lbn - 1), maxrun); else *runp = 0; } if (fsess_maybe_impl(mp, FUSE_BMAP)) { fdisp_init(&fdi, sizeof(*fbi)); fdisp_make_vp(&fdi, FUSE_BMAP, vp, td, td->td_ucred); fbi = fdi.indata; fbi->block = lbn; fbi->blocksize = biosize; error = fdisp_wait_answ(&fdi); if (error == ENOSYS) { fdisp_destroy(&fdi); fsess_set_notimpl(mp, FUSE_BMAP); error = 0; } else { fbo = fdi.answ; if (error == 0 && pbn != NULL) *pbn = fbo->block; fdisp_destroy(&fdi); return error; } } /* If the daemon doesn't support BMAP, make up a sensible default */ if (pbn != NULL) *pbn = lbn * btodb(biosize); return (error); } /* struct vop_close_args { struct vnode *a_vp; int a_fflag; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; struct ucred *cred = ap->a_cred; int fflag = ap->a_fflag; struct thread *td = ap->a_td; pid_t pid = td->td_proc->p_pid; + struct fuse_vnode_data *fvdat = VTOFUD(vp); int err = 0; if (fuse_isdeadfs(vp)) return 0; if (vnode_isdir(vp)) return 0; if (fflag & IO_NDELAY) return 0; err = fuse_flush(vp, cred, pid, fflag); + if (err == 0 && (fvdat->flag & FN_ATIMECHANGE)) { + struct vattr vap; + + VATTR_NULL(&vap); + vap.va_atime = fvdat->cached_attrs.va_atime; + err = fuse_internal_setattr(vp, &vap, td, NULL); + } /* TODO: close the file handle, if we're sure it's no longer used */ - if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { + if ((fvdat->flag & FN_SIZECHANGE) != 0) { fuse_vnode_savesize(vp, cred, td->td_proc->p_pid); } return err; } /* struct vop_copy_file_range_args { struct vop_generic_args a_gen; struct vnode *a_invp; off_t *a_inoffp; struct vnode *a_outvp; off_t *a_outoffp; size_t *a_lenp; unsigned int a_flags; struct ucred *a_incred; struct ucred *a_outcred; struct thread *a_fsizetd; } */ static int fuse_vnop_copy_file_range(struct vop_copy_file_range_args *ap) { struct vnode *invp = ap->a_invp; struct vnode *outvp = ap->a_outvp; struct mount *mp = vnode_mount(invp); struct fuse_vnode_data *outfvdat = VTOFUD(outvp); struct fuse_dispatcher fdi; struct fuse_filehandle *infufh, *outfufh; struct fuse_copy_file_range_in *fcfri; struct ucred *incred = ap->a_incred; struct ucred *outcred = ap->a_outcred; struct fuse_write_out *fwo; struct thread *td; struct uio io; pid_t pid; int err; if (mp != vnode_mount(outvp)) goto fallback; if (incred->cr_uid != outcred->cr_uid) goto fallback; if (incred->cr_groups[0] != outcred->cr_groups[0]) goto fallback; if (fsess_not_impl(mp, FUSE_COPY_FILE_RANGE)) goto fallback; if (ap->a_fsizetd == NULL) td = curthread; else td = ap->a_fsizetd; pid = td->td_proc->p_pid; /* Lock both vnodes, avoiding risk of deadlock. */ do { err = vn_lock(outvp, LK_EXCLUSIVE); if (invp == outvp) break; if (err == 0) { err = vn_lock(invp, LK_SHARED | LK_NOWAIT); if (err == 0) break; VOP_UNLOCK(outvp); err = vn_lock(invp, LK_SHARED); if (err == 0) VOP_UNLOCK(invp); } } while (err == 0); if (err != 0) return (err); err = fuse_filehandle_getrw(invp, FREAD, &infufh, incred, pid); if (err) goto unlock; err = fuse_filehandle_getrw(outvp, FWRITE, &outfufh, outcred, pid); if (err) goto unlock; if (ap->a_fsizetd) { io.uio_offset = *ap->a_outoffp; io.uio_resid = *ap->a_lenp; err = vn_rlimit_fsize(outvp, &io, ap->a_fsizetd); if (err) goto unlock; } fdisp_init(&fdi, sizeof(*fcfri)); fdisp_make_vp(&fdi, FUSE_COPY_FILE_RANGE, invp, td, incred); fcfri = fdi.indata; fcfri->fh_in = infufh->fh_id; fcfri->off_in = *ap->a_inoffp; fcfri->nodeid_out = VTOI(outvp); fcfri->fh_out = outfufh->fh_id; fcfri->off_out = *ap->a_outoffp; fcfri->len = *ap->a_lenp; fcfri->flags = 0; err = fdisp_wait_answ(&fdi); if (err == 0) { fwo = fdi.answ; *ap->a_lenp = fwo->size; *ap->a_inoffp += fwo->size; *ap->a_outoffp += fwo->size; fuse_internal_clear_suid_on_write(outvp, outcred, td); if (*ap->a_outoffp > outfvdat->cached_attrs.va_size) fuse_vnode_setsize(outvp, *ap->a_outoffp, false); } fdisp_destroy(&fdi); unlock: if (invp != outvp) VOP_UNLOCK(invp); VOP_UNLOCK(outvp); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_COPY_FILE_RANGE); fallback: err = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp, ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred, ap->a_outcred, ap->a_fsizetd); } return (err); } static void fdisp_make_mknod_for_fallback( struct fuse_dispatcher *fdip, struct componentname *cnp, struct vnode *dvp, uint64_t parentnid, struct thread *td, struct ucred *cred, mode_t mode, enum fuse_opcode *op) { struct fuse_mknod_in *fmni; fdisp_init(fdip, sizeof(*fmni) + cnp->cn_namelen + 1); *op = FUSE_MKNOD; fdisp_make(fdip, *op, vnode_mount(dvp), parentnid, td, cred); fmni = fdip->indata; fmni->mode = mode; fmni->rdev = 0; memcpy((char *)fdip->indata + sizeof(*fmni), cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[sizeof(*fmni) + cnp->cn_namelen] = '\0'; } /* struct vnop_create_args { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; }; */ static int fuse_vnop_create(struct vop_create_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = ap->a_vap; struct thread *td = cnp->cn_thread; struct ucred *cred = cnp->cn_cred; struct fuse_data *data; struct fuse_create_in *fci; struct fuse_entry_out *feo; struct fuse_open_out *foo; struct fuse_dispatcher fdi, fdi2; struct fuse_dispatcher *fdip = &fdi; struct fuse_dispatcher *fdip2 = NULL; int err; struct mount *mp = vnode_mount(dvp); data = fuse_get_mpdata(mp); uint64_t parentnid = VTOFUD(dvp)->nid; mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode); enum fuse_opcode op; int flags; if (fuse_isdeadfs(dvp)) return ENXIO; /* FUSE expects sockets to be created with FUSE_MKNOD */ if (vap->va_type == VSOCK) return fuse_internal_mknod(dvp, vpp, cnp, vap); /* * VOP_CREATE doesn't tell us the open(2) flags, so we guess. Only a * writable mode makes sense, and we might as well include readability * too. */ flags = O_RDWR; bzero(&fdi, sizeof(fdi)); if (vap->va_type != VREG) return (EINVAL); if (fsess_not_impl(mp, FUSE_CREATE) || vap->va_type == VSOCK) { /* Fallback to FUSE_MKNOD/FUSE_OPEN */ fdisp_make_mknod_for_fallback(fdip, cnp, dvp, parentnid, td, cred, mode, &op); } else { /* Use FUSE_CREATE */ size_t insize; op = FUSE_CREATE; fdisp_init(fdip, sizeof(*fci) + cnp->cn_namelen + 1); fdisp_make(fdip, op, vnode_mount(dvp), parentnid, td, cred); fci = fdip->indata; fci->mode = mode; fci->flags = O_CREAT | flags; if (fuse_libabi_geq(data, 7, 12)) { insize = sizeof(*fci); fci->umask = td->td_proc->p_pd->pd_cmask; } else { insize = sizeof(struct fuse_open_in); } memcpy((char *)fdip->indata + insize, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[insize + cnp->cn_namelen] = '\0'; } err = fdisp_wait_answ(fdip); if (err) { if (err == ENOSYS && op == FUSE_CREATE) { fsess_set_notimpl(mp, FUSE_CREATE); fdisp_destroy(fdip); fdisp_make_mknod_for_fallback(fdip, cnp, dvp, parentnid, td, cred, mode, &op); err = fdisp_wait_answ(fdip); } if (err) goto out; } feo = fdip->answ; if ((err = fuse_internal_checkentry(feo, vap->va_type))) { goto out; } if (op == FUSE_CREATE) { foo = (struct fuse_open_out*)(feo + 1); } else { /* Issue a separate FUSE_OPEN */ struct fuse_open_in *foi; fdip2 = &fdi2; fdisp_init(fdip2, sizeof(*foi)); fdisp_make(fdip2, FUSE_OPEN, vnode_mount(dvp), feo->nodeid, td, cred); foi = fdip2->indata; foi->flags = flags; err = fdisp_wait_answ(fdip2); if (err) goto out; foo = fdip2->answ; } err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vap->va_type); if (err) { struct fuse_release_in *fri; uint64_t nodeid = feo->nodeid; uint64_t fh_id = foo->fh; fdisp_init(fdip, sizeof(*fri)); fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred); fri = fdip->indata; fri->fh = fh_id; fri->flags = flags; fuse_insert_callback(fdip->tick, fuse_internal_forget_callback); fuse_insert_message(fdip->tick, false); goto out; } ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create"); fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL, true); fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, td, cred, foo); fuse_vnode_open(*vpp, foo->open_flags, td); /* * Purge the parent's attribute cache because the daemon should've * updated its mtime and ctime */ fuse_vnode_clear_attr_cache(dvp); cache_purge_negative(dvp); out: if (fdip2) fdisp_destroy(fdip2); fdisp_destroy(fdip); return err; } /* struct vnop_fdatasync_args { struct vop_generic_args a_gen; struct vnode * a_vp; struct thread * a_td; }; */ static int fuse_vnop_fdatasync(struct vop_fdatasync_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; int waitfor = MNT_WAIT; int err = 0; if (fuse_isdeadfs(vp)) { return 0; } if ((err = vop_stdfdatasync_buf(ap))) return err; return fuse_internal_fsync(vp, td, waitfor, true); } /* struct vnop_fsync_args { struct vop_generic_args a_gen; struct vnode * a_vp; int a_waitfor; struct thread * a_td; }; */ static int fuse_vnop_fsync(struct vop_fsync_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; int waitfor = ap->a_waitfor; int err = 0; if (fuse_isdeadfs(vp)) { return 0; } if ((err = vop_stdfsync(ap))) return err; return fuse_internal_fsync(vp, td, waitfor, false); } /* struct vnop_getattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; int err = 0; int dataflags; dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; /* Note that we are not bailing out on a dead file system just yet. */ if (!(dataflags & FSESS_INITED)) { if (!vnode_isvroot(vp)) { fdata_set_dead(fuse_get_mpdata(vnode_mount(vp))); err = ENOTCONN; return err; } else { goto fake; } } err = fuse_internal_getattr(vp, vap, cred, td); if (err == ENOTCONN && vnode_isvroot(vp)) { /* see comment in fuse_vfsop_statfs() */ goto fake; } else { return err; } fake: bzero(vap, sizeof(*vap)); vap->va_type = vnode_vtype(vp); return 0; } /* struct vnop_inactive_args { struct vnode *a_vp; }; */ static int fuse_vnop_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = curthread; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh, *fufh_tmp; int need_flush = 1; LIST_FOREACH_SAFE(fufh, &fvdat->handles, next, fufh_tmp) { if (need_flush && vp->v_type == VREG) { if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { fuse_vnode_savesize(vp, NULL, 0); } if ((fvdat->flag & FN_REVOKED) != 0) fuse_io_invalbuf(vp, td); else fuse_io_flushbuf(vp, MNT_WAIT, td); need_flush = 0; } fuse_filehandle_close(vp, fufh, td, NULL); } if ((fvdat->flag & FN_REVOKED) != 0) vrecycle(vp); return 0; } /* struct vnop_ioctl_args { struct vnode *a_vp; u_long a_command; caddr_t a_data; int a_fflag; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_ioctl(struct vop_ioctl_args *ap) { struct vnode *vp = ap->a_vp; struct mount *mp = vnode_mount(vp); struct ucred *cred = ap->a_cred; off_t *offp; pid_t pid = ap->a_td->td_proc->p_pid; int err; switch (ap->a_command) { case FIOSEEKDATA: case FIOSEEKHOLE: /* Call FUSE_LSEEK, if we can, or fall back to vop_stdioctl */ if (fsess_maybe_impl(mp, FUSE_LSEEK)) { int whence; offp = ap->a_data; if (ap->a_command == FIOSEEKDATA) whence = SEEK_DATA; else whence = SEEK_HOLE; vn_lock(vp, LK_SHARED | LK_RETRY); err = fuse_vnop_do_lseek(vp, ap->a_td, cred, pid, offp, whence); VOP_UNLOCK(vp); } if (fsess_not_impl(mp, FUSE_LSEEK)) err = vop_stdioctl(ap); break; default: /* TODO: implement FUSE_IOCTL */ err = ENOTTY; break; } return (err); } /* struct vnop_link_args { struct vnode *a_tdvp; struct vnode *a_vp; struct componentname *a_cnp; }; */ static int fuse_vnop_link(struct vop_link_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = VTOVA(vp); struct fuse_dispatcher fdi; struct fuse_entry_out *feo; struct fuse_link_in fli; int err; if (fuse_isdeadfs(vp)) { return ENXIO; } if (vnode_mount(tdvp) != vnode_mount(vp)) { return EXDEV; } /* * This is a seatbelt check to protect naive userspace filesystems from * themselves and the limitations of the FUSE IPC protocol. If a * filesystem does not allow attribute caching, assume it is capable of * validating that nlink does not overflow. */ if (vap != NULL && vap->va_nlink >= FUSE_LINK_MAX) return EMLINK; fli.oldnodeid = VTOI(vp); fdisp_init(&fdi, 0); fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp, FUSE_LINK, &fli, sizeof(fli), &fdi); if ((err = fdisp_wait_answ(&fdi))) { goto out; } feo = fdi.answ; err = fuse_internal_checkentry(feo, vnode_vtype(vp)); if (!err) { /* * Purge the parent's attribute cache because the daemon * should've updated its mtime and ctime */ fuse_vnode_clear_attr_cache(tdvp); fuse_internal_cache_attrs(vp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL, true); } out: fdisp_destroy(&fdi); return err; } struct fuse_lookup_alloc_arg { struct fuse_entry_out *feo; struct componentname *cnp; uint64_t nid; enum vtype vtyp; }; /* Callback for vn_get_ino */ static int fuse_lookup_alloc(struct mount *mp, void *arg, int lkflags, struct vnode **vpp) { struct fuse_lookup_alloc_arg *flaa = arg; return fuse_vnode_get(mp, flaa->feo, flaa->nid, NULL, vpp, flaa->cnp, flaa->vtyp); } SDT_PROBE_DEFINE3(fusefs, , vnops, cache_lookup, "int", "struct timespec*", "struct timespec*"); /* struct vnop_lookup_args { struct vnodeop_desc *a_desc; struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; }; */ int fuse_vnop_lookup(struct vop_lookup_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct thread *td = cnp->cn_thread; struct ucred *cred = cnp->cn_cred; int nameiop = cnp->cn_nameiop; int flags = cnp->cn_flags; int wantparent = flags & (LOCKPARENT | WANTPARENT); int islastcn = flags & ISLASTCN; struct mount *mp = vnode_mount(dvp); struct fuse_data *data = fuse_get_mpdata(mp); int default_permissions = data->dataflags & FSESS_DEFAULT_PERMISSIONS; int err = 0; int lookup_err = 0; struct vnode *vp = NULL; struct fuse_dispatcher fdi; bool did_lookup = false; struct fuse_entry_out *feo = NULL; enum vtype vtyp; /* vnode type of target */ off_t filesize; /* filesize of target */ uint64_t nid; if (fuse_isdeadfs(dvp)) { *vpp = NULL; return ENXIO; } if (!vnode_isdir(dvp)) return ENOTDIR; if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) return EROFS; if ((cnp->cn_flags & NOEXECCHECK) != 0) cnp->cn_flags &= ~NOEXECCHECK; else if ((err = fuse_internal_access(dvp, VEXEC, td, cred))) return err; if (flags & ISDOTDOT) { KASSERT(VTOFUD(dvp)->flag & FN_PARENT_NID, ("Looking up .. is TODO")); nid = VTOFUD(dvp)->parent_nid; if (nid == 0) return ENOENT; /* .. is obviously a directory */ vtyp = VDIR; filesize = 0; } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') { nid = VTOI(dvp); /* . is obviously a directory */ vtyp = VDIR; filesize = 0; } else { struct timespec now, timeout; int ncpticks; /* here to accomodate for API contract */ err = cache_lookup(dvp, vpp, cnp, &timeout, &ncpticks); getnanouptime(&now); SDT_PROBE3(fusefs, , vnops, cache_lookup, err, &timeout, &now); switch (err) { case -1: /* positive match */ if (timespeccmp(&timeout, &now, >)) { counter_u64_add(fuse_lookup_cache_hits, 1); } else { /* Cache timeout */ counter_u64_add(fuse_lookup_cache_misses, 1); bintime_clear( &VTOFUD(*vpp)->entry_cache_timeout); cache_purge(*vpp); if (dvp != *vpp) vput(*vpp); else vrele(*vpp); *vpp = NULL; break; } return 0; case 0: /* no match in cache */ counter_u64_add(fuse_lookup_cache_misses, 1); break; case ENOENT: /* negative match */ if (timespeccmp(&timeout, &now, <=)) { /* Cache timeout */ cache_purge_negative(dvp); break; } /* fall through */ default: return err; } nid = VTOI(dvp); fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make(&fdi, FUSE_LOOKUP, mp, nid, td, cred); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; lookup_err = fdisp_wait_answ(&fdi); did_lookup = true; if (!lookup_err) { /* lookup call succeeded */ feo = (struct fuse_entry_out *)fdi.answ; nid = feo->nodeid; if (nid == 0) { /* zero nodeid means ENOENT and cache it */ struct timespec timeout; fdi.answ_stat = ENOENT; lookup_err = ENOENT; if (cnp->cn_flags & MAKEENTRY) { fuse_validity_2_timespec(feo, &timeout); cache_enter_time(dvp, *vpp, cnp, &timeout, NULL); } } else if (nid == FUSE_ROOT_ID) { lookup_err = EINVAL; } vtyp = IFTOVT(feo->attr.mode); filesize = feo->attr.size; } if (lookup_err && (!fdi.answ_stat || lookup_err != ENOENT)) { fdisp_destroy(&fdi); return lookup_err; } } /* lookup_err, if non-zero, must be ENOENT at this point */ if (lookup_err) { /* Entry not found */ if ((nameiop == CREATE || nameiop == RENAME) && islastcn) { if (default_permissions) err = fuse_internal_access(dvp, VWRITE, td, cred); else err = 0; if (!err) { /* * Set the SAVENAME flag to hold onto the * pathname for use later in VOP_CREATE or * VOP_RENAME. */ cnp->cn_flags |= SAVENAME; err = EJUSTRETURN; } } else { err = ENOENT; } } else { /* Entry was found */ if (flags & ISDOTDOT) { struct fuse_lookup_alloc_arg flaa; flaa.nid = nid; flaa.feo = feo; flaa.cnp = cnp; flaa.vtyp = vtyp; err = vn_vget_ino_gen(dvp, fuse_lookup_alloc, &flaa, 0, &vp); *vpp = vp; } else if (nid == VTOI(dvp)) { vref(dvp); *vpp = dvp; } else { struct fuse_vnode_data *fvdat; err = fuse_vnode_get(vnode_mount(dvp), feo, nid, dvp, &vp, cnp, vtyp); if (err) goto out; *vpp = vp; fvdat = VTOFUD(vp); MPASS(feo != NULL); fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL, true); fuse_validity_2_bintime(feo->entry_valid, feo->entry_valid_nsec, &fvdat->entry_cache_timeout); if ((nameiop == DELETE || nameiop == RENAME) && islastcn && default_permissions) { struct vattr dvattr; err = fuse_internal_access(dvp, VWRITE, td, cred); if (err != 0) goto out; /* * if the parent's sticky bit is set, check * whether we're allowed to remove the file. * Need to figure out the vnode locking to make * this work. */ fuse_internal_getattr(dvp, &dvattr, cred, td); if ((dvattr.va_mode & S_ISTXT) && fuse_internal_access(dvp, VADMIN, td, cred) && fuse_internal_access(*vpp, VADMIN, td, cred)) { err = EPERM; goto out; } } if (islastcn && ( (nameiop == DELETE) || (nameiop == RENAME && wantparent))) { cnp->cn_flags |= SAVENAME; } } } out: if (err) { if (vp != NULL && dvp != vp) vput(vp); else if (vp != NULL) vrele(vp); *vpp = NULL; } if (did_lookup) fdisp_destroy(&fdi); return err; } /* struct vnop_mkdir_args { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; }; */ static int fuse_vnop_mkdir(struct vop_mkdir_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = ap->a_vap; struct fuse_mkdir_in fmdi; if (fuse_isdeadfs(dvp)) { return ENXIO; } fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode); fmdi.umask = curthread->td_proc->p_pd->pd_cmask; return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi, sizeof(fmdi), VDIR)); } /* struct vnop_mknod_args { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; }; */ static int fuse_vnop_mknod(struct vop_mknod_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = ap->a_vap; if (fuse_isdeadfs(dvp)) return ENXIO; return fuse_internal_mknod(dvp, vpp, cnp, vap); } /* struct vop_open_args { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct thread *a_td; int a_fdidx; / struct file *a_fp; }; */ static int fuse_vnop_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; int a_mode = ap->a_mode; struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; pid_t pid = td->td_proc->p_pid; struct fuse_vnode_data *fvdat; if (fuse_isdeadfs(vp)) return ENXIO; if (vp->v_type == VCHR || vp->v_type == VBLK || vp->v_type == VFIFO) return (EOPNOTSUPP); if ((a_mode & (FREAD | FWRITE | FEXEC)) == 0) return EINVAL; fvdat = VTOFUD(vp); if (fuse_filehandle_validrw(vp, a_mode, cred, pid)) { fuse_vnode_open(vp, 0, td); return 0; } return fuse_filehandle_open(vp, a_mode, NULL, td, cred); } static int fuse_vnop_pathconf(struct vop_pathconf_args *ap) { struct vnode *vp = ap->a_vp; struct mount *mp; switch (ap->a_name) { case _PC_FILESIZEBITS: *ap->a_retval = 64; return (0); case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; return (0); case _PC_LINK_MAX: *ap->a_retval = MIN(LONG_MAX, FUSE_LINK_MAX); return (0); case _PC_SYMLINK_MAX: *ap->a_retval = MAXPATHLEN; return (0); case _PC_NO_TRUNC: *ap->a_retval = 1; return (0); case _PC_MIN_HOLE_SIZE: /* * The FUSE protocol provides no mechanism for a server to * report _PC_MIN_HOLE_SIZE. It's a protocol bug. Instead, * return EINVAL if the server does not support FUSE_LSEEK, or * 1 if it does. */ mp = vnode_mount(vp); if (!fsess_is_impl(mp, FUSE_LSEEK) && !fsess_not_impl(mp, FUSE_LSEEK)) { off_t offset = 0; /* Issue a FUSE_LSEEK to find out if it's implemented */ fuse_vnop_do_lseek(vp, curthread, curthread->td_ucred, curthread->td_proc->p_pid, &offset, SEEK_DATA); } if (fsess_is_impl(mp, FUSE_LSEEK)) { *ap->a_retval = 1; return (0); } else { /* * Probably FUSE_LSEEK is not implemented. It might * be, if the FUSE_LSEEK above returned an error like * EACCES, but in that case we can't tell, so it's * safest to report EINVAL anyway. */ return (EINVAL); } default: return (vop_stdpathconf(ap)); } } /* struct vnop_read_args { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; struct ucred *a_cred; }; */ static int fuse_vnop_read(struct vop_read_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; int ioflag = ap->a_ioflag; struct ucred *cred = ap->a_cred; pid_t pid = curthread->td_proc->p_pid; if (fuse_isdeadfs(vp)) { return ENXIO; } if (VTOFUD(vp)->flag & FN_DIRECTIO) { ioflag |= IO_DIRECT; } return fuse_io_dispatch(vp, uio, ioflag, cred, pid); } /* struct vnop_readdir_args { struct vnode *a_vp; struct uio *a_uio; struct ucred *a_cred; int *a_eofflag; int *a_ncookies; u_long **a_cookies; }; */ static int fuse_vnop_readdir(struct vop_readdir_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct ucred *cred = ap->a_cred; struct fuse_filehandle *fufh = NULL; struct fuse_iov cookediov; int err = 0; u_long *cookies; off_t startoff; ssize_t tresid; int ncookies; bool closefufh = false; pid_t pid = curthread->td_proc->p_pid; if (ap->a_eofflag) *ap->a_eofflag = 0; if (fuse_isdeadfs(vp)) { return ENXIO; } if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */ (uio_resid(uio) < sizeof(struct dirent))) { return EINVAL; } tresid = uio->uio_resid; startoff = uio->uio_offset; err = fuse_filehandle_get_dir(vp, &fufh, cred, pid); if (err == EBADF && vnode_mount(vp)->mnt_flag & MNT_EXPORTED) { /* * nfsd will do VOP_READDIR without first doing VOP_OPEN. We * must implicitly open the directory here */ err = fuse_filehandle_open(vp, FREAD, &fufh, curthread, cred); if (err == 0) { /* * When a directory is opened, it must be read from * the beginning. Hopefully, the "startoff" still * exists as an offset cookie for the directory. * If not, it will read the entire directory without * returning any entries and just return eof. */ uio->uio_offset = 0; } closefufh = true; } if (err) return (err); if (ap->a_ncookies != NULL) { ncookies = uio->uio_resid / (offsetof(struct dirent, d_name) + 4) + 1; cookies = malloc(ncookies * sizeof(*cookies), M_TEMP, M_WAITOK); *ap->a_ncookies = ncookies; *ap->a_cookies = cookies; } else { ncookies = 0; cookies = NULL; } #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1) fiov_init(&cookediov, DIRCOOKEDSIZE); err = fuse_internal_readdir(vp, uio, startoff, fufh, &cookediov, &ncookies, cookies); fiov_teardown(&cookediov); if (closefufh) fuse_filehandle_close(vp, fufh, curthread, cred); if (ap->a_ncookies != NULL) { if (err == 0) { *ap->a_ncookies -= ncookies; } else { free(*ap->a_cookies, M_TEMP); *ap->a_ncookies = 0; *ap->a_cookies = NULL; } } if (err == 0 && tresid == uio->uio_resid) *ap->a_eofflag = 1; return err; } /* struct vnop_readlink_args { struct vnode *a_vp; struct uio *a_uio; struct ucred *a_cred; }; */ static int fuse_vnop_readlink(struct vop_readlink_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct ucred *cred = ap->a_cred; struct fuse_dispatcher fdi; int err; if (fuse_isdeadfs(vp)) { return ENXIO; } if (!vnode_islnk(vp)) { return EINVAL; } fdisp_init(&fdi, 0); err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred); if (err) { goto out; } if (((char *)fdi.answ)[0] == '/' && fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) { char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname; err = uiomove(mpth, strlen(mpth), uio); } if (!err) { err = uiomove(fdi.answ, fdi.iosize, uio); } out: fdisp_destroy(&fdi); return err; } /* struct vnop_reclaim_args { struct vnode *a_vp; }; */ static int fuse_vnop_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = curthread; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh, *fufh_tmp; if (!fvdat) { panic("FUSE: no vnode data during recycling"); } LIST_FOREACH_SAFE(fufh, &fvdat->handles, next, fufh_tmp) { printf("FUSE: vnode being reclaimed with open fufh " "(type=%#x)", fufh->fufh_type); fuse_filehandle_close(vp, fufh, td, NULL); } if (!fuse_isdeadfs(vp) && fvdat->nlookup > 0) { fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp), fvdat->nlookup); } cache_purge(vp); vfs_hash_remove(vp); fuse_vnode_destroy(vp); return 0; } /* struct vnop_remove_args { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; }; */ static int fuse_vnop_remove(struct vop_remove_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; struct componentname *cnp = ap->a_cnp; int err; if (fuse_isdeadfs(vp)) { return ENXIO; } if (vnode_isdir(vp)) { return EPERM; } err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK); return err; } /* struct vnop_rename_args { struct vnode *a_fdvp; struct vnode *a_fvp; struct componentname *a_fcnp; struct vnode *a_tdvp; struct vnode *a_tvp; struct componentname *a_tcnp; }; */ static int fuse_vnop_rename(struct vop_rename_args *ap) { struct vnode *fdvp = ap->a_fdvp; struct vnode *fvp = ap->a_fvp; struct componentname *fcnp = ap->a_fcnp; struct vnode *tdvp = ap->a_tdvp; struct vnode *tvp = ap->a_tvp; struct componentname *tcnp = ap->a_tcnp; struct fuse_data *data; bool newparent = fdvp != tdvp; bool isdir = fvp->v_type == VDIR; int err = 0; if (fuse_isdeadfs(fdvp)) { return ENXIO; } if (fvp->v_mount != tdvp->v_mount || (tvp && fvp->v_mount != tvp->v_mount)) { SDT_PROBE2(fusefs, , vnops, trace, 1, "cross-device rename"); err = EXDEV; goto out; } cache_purge(fvp); /* * FUSE library is expected to check if target directory is not * under the source directory in the file system tree. * Linux performs this check at VFS level. */ /* * If source is a directory, and it will get a new parent, user must * have write permission to it, so ".." can be modified. */ data = fuse_get_mpdata(vnode_mount(tdvp)); if (data->dataflags & FSESS_DEFAULT_PERMISSIONS && isdir && newparent) { err = fuse_internal_access(fvp, VWRITE, tcnp->cn_thread, tcnp->cn_cred); if (err) goto out; } sx_xlock(&data->rename_lock); err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp); if (err == 0) { if (tdvp != fdvp) fuse_vnode_setparent(fvp, tdvp); if (tvp != NULL) fuse_vnode_setparent(tvp, NULL); } sx_unlock(&data->rename_lock); if (tvp != NULL && tvp != fvp) { cache_purge(tvp); } if (vnode_isdir(fvp)) { if ((tvp != NULL) && vnode_isdir(tvp)) { cache_purge(tdvp); } cache_purge(fdvp); } out: if (tdvp == tvp) { vrele(tdvp); } else { vput(tdvp); } if (tvp != NULL) { vput(tvp); } vrele(fdvp); vrele(fvp); return err; } /* struct vnop_rmdir_args { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; } *ap; */ static int fuse_vnop_rmdir(struct vop_rmdir_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; int err; if (fuse_isdeadfs(vp)) { return ENXIO; } if (VTOFUD(vp) == VTOFUD(dvp)) { return EINVAL; } err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR); return err; } /* struct vnop_setattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_setattr(struct vop_setattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct mount *mp; struct fuse_data *data; struct vattr old_va; int dataflags; int err = 0, err2; accmode_t accmode = 0; bool checkperm; bool drop_suid = false; gid_t cr_gid; mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; checkperm = dataflags & FSESS_DEFAULT_PERMISSIONS; if (cred->cr_ngroups > 0) cr_gid = cred->cr_groups[0]; else cr_gid = 0; if (fuse_isdeadfs(vp)) { return ENXIO; } if (vap->va_uid != (uid_t)VNOVAL) { if (checkperm) { /* Only root may change a file's owner */ err = priv_check_cred(cred, PRIV_VFS_CHOWN); if (err) { /* As a special case, allow the null chown */ err2 = fuse_internal_getattr(vp, &old_va, cred, td); if (err2) return (err2); if (vap->va_uid != old_va.va_uid) return err; else accmode |= VADMIN; drop_suid = true; } else accmode |= VADMIN; } else accmode |= VADMIN; } if (vap->va_gid != (gid_t)VNOVAL) { if (checkperm && priv_check_cred(cred, PRIV_VFS_CHOWN)) drop_suid = true; if (checkperm && !groupmember(vap->va_gid, cred)) { /* * Non-root users may only chgrp to one of their own * groups */ err = priv_check_cred(cred, PRIV_VFS_CHOWN); if (err) { /* As a special case, allow the null chgrp */ err2 = fuse_internal_getattr(vp, &old_va, cred, td); if (err2) return (err2); if (vap->va_gid != old_va.va_gid) return err; accmode |= VADMIN; } else accmode |= VADMIN; } else accmode |= VADMIN; } if (vap->va_size != VNOVAL) { switch (vp->v_type) { case VDIR: return (EISDIR); case VLNK: case VREG: if (vfs_isrdonly(mp)) return (EROFS); break; default: /* * According to POSIX, the result is unspecified * for file types other than regular files, * directories and shared memory objects. We * don't support shared memory objects in the file * system, and have dubious support for truncating * symlinks. Just ignore the request in other cases. */ return (0); } /* Don't set accmode. Permission to trunc is checked upstack */ } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { if (vap->va_vaflags & VA_UTIMES_NULL) accmode |= VWRITE; else accmode |= VADMIN; } if (drop_suid) { if (vap->va_mode != (mode_t)VNOVAL) vap->va_mode &= ~(S_ISUID | S_ISGID); else { err = fuse_internal_getattr(vp, &old_va, cred, td); if (err) return (err); vap->va_mode = old_va.va_mode & ~(S_ISUID | S_ISGID); } } if (vap->va_mode != (mode_t)VNOVAL) { /* Only root may set the sticky bit on non-directories */ if (checkperm && vp->v_type != VDIR && (vap->va_mode & S_ISTXT) && priv_check_cred(cred, PRIV_VFS_STICKYFILE)) return EFTYPE; if (checkperm && (vap->va_mode & S_ISGID)) { err = fuse_internal_getattr(vp, &old_va, cred, td); if (err) return (err); if (!groupmember(old_va.va_gid, cred)) { err = priv_check_cred(cred, PRIV_VFS_SETGID); if (err) return (err); } } accmode |= VADMIN; } if (vfs_isrdonly(mp)) return EROFS; if (checkperm) { err = fuse_internal_access(vp, accmode, td, cred); } else { err = 0; } if (err) return err; else return fuse_internal_setattr(vp, vap, td, cred); } /* struct vnop_strategy_args { struct vnode *a_vp; struct buf *a_bp; }; */ static int fuse_vnop_strategy(struct vop_strategy_args *ap) { struct vnode *vp = ap->a_vp; struct buf *bp = ap->a_bp; if (!vp || fuse_isdeadfs(vp)) { bp->b_ioflags |= BIO_ERROR; bp->b_error = ENXIO; bufdone(bp); return 0; } /* * VOP_STRATEGY always returns zero and signals error via bp->b_ioflags. * fuse_io_strategy sets bp's error fields */ (void)fuse_io_strategy(vp, bp); return 0; } /* struct vnop_symlink_args { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; char *a_target; }; */ static int fuse_vnop_symlink(struct vop_symlink_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; const char *target = ap->a_target; struct fuse_dispatcher fdi; int err; size_t len; if (fuse_isdeadfs(dvp)) { return ENXIO; } /* * Unlike the other creator type calls, here we have to create a message * where the name of the new entry comes first, and the data describing * the entry comes second. * Hence we can't rely on our handy fuse_internal_newentry() routine, * but put together the message manually and just call the core part. */ len = strlen(target) + 1; fdisp_init(&fdi, len + cnp->cn_namelen + 1); fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len); err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi); fdisp_destroy(&fdi); return err; } /* struct vnop_write_args { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; struct ucred *a_cred; }; */ static int fuse_vnop_write(struct vop_write_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; int ioflag = ap->a_ioflag; struct ucred *cred = ap->a_cred; pid_t pid = curthread->td_proc->p_pid; if (fuse_isdeadfs(vp)) { return ENXIO; } if (VTOFUD(vp)->flag & FN_DIRECTIO) { ioflag |= IO_DIRECT; } return fuse_io_dispatch(vp, uio, ioflag, cred, pid); } static daddr_t fuse_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) { const int biosize = fuse_iosize(vp); return (off / biosize); } static int fuse_gbp_getblksz(struct vnode *vp, daddr_t lbn, long *blksz) { off_t filesize; int err; const int biosize = fuse_iosize(vp); err = fuse_vnode_size(vp, &filesize, NULL, NULL); if (err) { /* This will turn into a SIGBUS */ return (EIO); } else if ((off_t)lbn * biosize >= filesize) { *blksz = 0; } else if ((off_t)(lbn + 1) * biosize > filesize) { *blksz = filesize - (off_t)lbn *biosize; } else { *blksz = biosize; } return (0); } /* struct vnop_getpages_args { struct vnode *a_vp; vm_page_t *a_m; int a_count; int a_reqpage; }; */ static int fuse_vnop_getpages(struct vop_getpages_args *ap) { struct vnode *vp = ap->a_vp; if (!fsess_opt_mmap(vnode_mount(vp))) { SDT_PROBE2(fusefs, , vnops, trace, 1, "called on non-cacheable vnode??\n"); return (VM_PAGER_ERROR); } return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind, ap->a_rahead, fuse_gbp_getblkno, fuse_gbp_getblksz)); } static const char extattr_namespace_separator = '.'; /* struct vop_getextattr_args { struct vop_generic_args a_gen; struct vnode *a_vp; int a_attrnamespace; const char *a_name; struct uio *a_uio; size_t *a_size; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_getextattr(struct vop_getextattr_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct fuse_dispatcher fdi; struct fuse_getxattr_in *get_xattr_in; struct fuse_getxattr_out *get_xattr_out; struct mount *mp = vnode_mount(vp); struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; char *prefix; char *attr_str; size_t len; int err; if (fuse_isdeadfs(vp)) return (ENXIO); if (fsess_not_impl(mp, FUSE_GETXATTR)) return EOPNOTSUPP; err = fuse_extattr_check_cred(vp, ap->a_attrnamespace, cred, td, VREAD); if (err) return err; /* Default to looking for user attributes. */ if (ap->a_attrnamespace == EXTATTR_NAMESPACE_SYSTEM) prefix = EXTATTR_NAMESPACE_SYSTEM_STRING; else prefix = EXTATTR_NAMESPACE_USER_STRING; len = strlen(prefix) + sizeof(extattr_namespace_separator) + strlen(ap->a_name) + 1; fdisp_init(&fdi, len + sizeof(*get_xattr_in)); fdisp_make_vp(&fdi, FUSE_GETXATTR, vp, td, cred); get_xattr_in = fdi.indata; /* * Check to see whether we're querying the available size or * issuing the actual request. If we pass in 0, we get back struct * fuse_getxattr_out. If we pass in a non-zero size, we get back * that much data, without the struct fuse_getxattr_out header. */ if (uio == NULL) get_xattr_in->size = 0; else get_xattr_in->size = uio->uio_resid; attr_str = (char *)fdi.indata + sizeof(*get_xattr_in); snprintf(attr_str, len, "%s%c%s", prefix, extattr_namespace_separator, ap->a_name); err = fdisp_wait_answ(&fdi); if (err != 0) { if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_GETXATTR); err = EOPNOTSUPP; } goto out; } get_xattr_out = fdi.answ; if (ap->a_size != NULL) *ap->a_size = get_xattr_out->size; if (uio != NULL) err = uiomove(fdi.answ, fdi.iosize, uio); out: fdisp_destroy(&fdi); return (err); } /* struct vop_setextattr_args { struct vop_generic_args a_gen; struct vnode *a_vp; int a_attrnamespace; const char *a_name; struct uio *a_uio; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_setextattr(struct vop_setextattr_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct fuse_dispatcher fdi; struct fuse_setxattr_in *set_xattr_in; struct mount *mp = vnode_mount(vp); struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; char *prefix; size_t len; char *attr_str; int err; if (fuse_isdeadfs(vp)) return (ENXIO); if (fsess_not_impl(mp, FUSE_SETXATTR)) return EOPNOTSUPP; if (vfs_isrdonly(mp)) return EROFS; /* Deleting xattrs must use VOP_DELETEEXTATTR instead */ if (ap->a_uio == NULL) { /* * If we got here as fallback from VOP_DELETEEXTATTR, then * return EOPNOTSUPP. */ if (fsess_not_impl(mp, FUSE_REMOVEXATTR)) return (EOPNOTSUPP); else return (EINVAL); } err = fuse_extattr_check_cred(vp, ap->a_attrnamespace, cred, td, VWRITE); if (err) return err; /* Default to looking for user attributes. */ if (ap->a_attrnamespace == EXTATTR_NAMESPACE_SYSTEM) prefix = EXTATTR_NAMESPACE_SYSTEM_STRING; else prefix = EXTATTR_NAMESPACE_USER_STRING; len = strlen(prefix) + sizeof(extattr_namespace_separator) + strlen(ap->a_name) + 1; fdisp_init(&fdi, len + sizeof(*set_xattr_in) + uio->uio_resid); fdisp_make_vp(&fdi, FUSE_SETXATTR, vp, td, cred); set_xattr_in = fdi.indata; set_xattr_in->size = uio->uio_resid; attr_str = (char *)fdi.indata + sizeof(*set_xattr_in); snprintf(attr_str, len, "%s%c%s", prefix, extattr_namespace_separator, ap->a_name); err = uiomove((char *)fdi.indata + sizeof(*set_xattr_in) + len, uio->uio_resid, uio); if (err != 0) { goto out; } err = fdisp_wait_answ(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_SETXATTR); err = EOPNOTSUPP; } if (err == ERESTART) { /* Can't restart after calling uiomove */ err = EINTR; } out: fdisp_destroy(&fdi); return (err); } /* * The Linux / FUSE extended attribute list is simply a collection of * NUL-terminated strings. The FreeBSD extended attribute list is a single * byte length followed by a non-NUL terminated string. So, this allows * conversion of the Linux / FUSE format to the FreeBSD format in place. * Linux attribute names are reported with the namespace as a prefix (e.g. * "user.attribute_name"), but in FreeBSD they are reported without the * namespace prefix (e.g. "attribute_name"). So, we're going from: * * user.attr_name1\0user.attr_name2\0 * * to: * * attr_name1attr_name2 * * Where "" is a single byte number of characters in the attribute name. * * Args: * prefix - exattr namespace prefix string * list, list_len - input list with namespace prefixes * bsd_list, bsd_list_len - output list compatible with bsd vfs */ static int fuse_xattrlist_convert(char *prefix, const char *list, int list_len, char *bsd_list, int *bsd_list_len) { int len, pos, dist_to_next, prefix_len; pos = 0; *bsd_list_len = 0; prefix_len = strlen(prefix); while (pos < list_len && list[pos] != '\0') { dist_to_next = strlen(&list[pos]) + 1; if (bcmp(&list[pos], prefix, prefix_len) == 0 && list[pos + prefix_len] == extattr_namespace_separator) { len = dist_to_next - (prefix_len + sizeof(extattr_namespace_separator)) - 1; if (len >= EXTATTR_MAXNAMELEN) return (ENAMETOOLONG); bsd_list[*bsd_list_len] = len; memcpy(&bsd_list[*bsd_list_len + 1], &list[pos + prefix_len + sizeof(extattr_namespace_separator)], len); *bsd_list_len += len + 1; } pos += dist_to_next; } return (0); } /* * List extended attributes * * The FUSE_LISTXATTR operation is based on Linux's listxattr(2) syscall, which * has a number of differences compared to its FreeBSD equivalent, * extattr_list_file: * * - FUSE_LISTXATTR returns all extended attributes across all namespaces, * whereas listxattr(2) only returns attributes for a single namespace * - FUSE_LISTXATTR prepends each attribute name with "namespace." * - If the provided buffer is not large enough to hold the result, * FUSE_LISTXATTR should return ERANGE, whereas listxattr is expected to * return as many results as will fit. */ /* struct vop_listextattr_args { struct vop_generic_args a_gen; struct vnode *a_vp; int a_attrnamespace; struct uio *a_uio; size_t *a_size; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_listextattr(struct vop_listextattr_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct fuse_dispatcher fdi; struct fuse_listxattr_in *list_xattr_in; struct fuse_listxattr_out *list_xattr_out; struct mount *mp = vnode_mount(vp); struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; char *prefix; char *bsd_list = NULL; char *linux_list; int bsd_list_len; int linux_list_len; int err; if (fuse_isdeadfs(vp)) return (ENXIO); if (fsess_not_impl(mp, FUSE_LISTXATTR)) return EOPNOTSUPP; err = fuse_extattr_check_cred(vp, ap->a_attrnamespace, cred, td, VREAD); if (err) return err; /* * Add space for a NUL and the period separator if enabled. * Default to looking for user attributes. */ if (ap->a_attrnamespace == EXTATTR_NAMESPACE_SYSTEM) prefix = EXTATTR_NAMESPACE_SYSTEM_STRING; else prefix = EXTATTR_NAMESPACE_USER_STRING; fdisp_init(&fdi, sizeof(*list_xattr_in)); fdisp_make_vp(&fdi, FUSE_LISTXATTR, vp, td, cred); /* * Retrieve Linux / FUSE compatible list size. */ list_xattr_in = fdi.indata; list_xattr_in->size = 0; err = fdisp_wait_answ(&fdi); if (err != 0) { if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_LISTXATTR); err = EOPNOTSUPP; } goto out; } list_xattr_out = fdi.answ; linux_list_len = list_xattr_out->size; if (linux_list_len == 0) { if (ap->a_size != NULL) *ap->a_size = linux_list_len; goto out; } /* * Retrieve Linux / FUSE compatible list values. */ fdisp_refresh_vp(&fdi, FUSE_LISTXATTR, vp, td, cred); list_xattr_in = fdi.indata; list_xattr_in->size = linux_list_len; err = fdisp_wait_answ(&fdi); if (err == ERANGE) { /* * Race detected. The attribute list must've grown since the * first FUSE_LISTXATTR call. Start over. Go all the way back * to userland so we can process signals, if necessary, before * restarting. */ err = ERESTART; goto out; } else if (err != 0) goto out; linux_list = fdi.answ; /* FUSE doesn't allow the server to return more data than requested */ if (fdi.iosize > linux_list_len) { struct fuse_data *data = fuse_get_mpdata(mp); fuse_warn(data, FSESS_WARN_LSEXTATTR_LONG, "server returned " "more extended attribute data than requested; " "should've returned ERANGE instead."); } else { /* But returning less data is fine */ linux_list_len = fdi.iosize; } /* * Retrieve the BSD compatible list values. * The Linux / FUSE attribute list format isn't the same * as FreeBSD's format. So we need to transform it into * FreeBSD's format before giving it to the user. */ bsd_list = malloc(linux_list_len, M_TEMP, M_WAITOK); err = fuse_xattrlist_convert(prefix, linux_list, linux_list_len, bsd_list, &bsd_list_len); if (err != 0) goto out; if (ap->a_size != NULL) *ap->a_size = bsd_list_len; if (uio != NULL) err = uiomove(bsd_list, bsd_list_len, uio); out: free(bsd_list, M_TEMP); fdisp_destroy(&fdi); return (err); } /* struct vop_deleteextattr_args { struct vop_generic_args a_gen; struct vnode *a_vp; int a_attrnamespace; const char *a_name; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_deleteextattr(struct vop_deleteextattr_args *ap) { struct vnode *vp = ap->a_vp; struct fuse_dispatcher fdi; struct mount *mp = vnode_mount(vp); struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; char *prefix; size_t len; char *attr_str; int err; if (fuse_isdeadfs(vp)) return (ENXIO); if (fsess_not_impl(mp, FUSE_REMOVEXATTR)) return EOPNOTSUPP; if (vfs_isrdonly(mp)) return EROFS; err = fuse_extattr_check_cred(vp, ap->a_attrnamespace, cred, td, VWRITE); if (err) return err; /* Default to looking for user attributes. */ if (ap->a_attrnamespace == EXTATTR_NAMESPACE_SYSTEM) prefix = EXTATTR_NAMESPACE_SYSTEM_STRING; else prefix = EXTATTR_NAMESPACE_USER_STRING; len = strlen(prefix) + sizeof(extattr_namespace_separator) + strlen(ap->a_name) + 1; fdisp_init(&fdi, len); fdisp_make_vp(&fdi, FUSE_REMOVEXATTR, vp, td, cred); attr_str = fdi.indata; snprintf(attr_str, len, "%s%c%s", prefix, extattr_namespace_separator, ap->a_name); err = fdisp_wait_answ(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_REMOVEXATTR); err = EOPNOTSUPP; } fdisp_destroy(&fdi); return (err); } /* struct vnop_print_args { struct vnode *a_vp; }; */ static int fuse_vnop_print(struct vop_print_args *ap) { struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp); printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n", (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid, (uintmax_t)fvdat->nlookup, fvdat->flag); return 0; } /* * Get an NFS filehandle for a FUSE file. * * This will only work for FUSE file systems that guarantee the uniqueness of * nodeid:generation, which most don't. */ /* vop_vptofh { IN struct vnode *a_vp; IN struct fid *a_fhp; }; */ static int fuse_vnop_vptofh(struct vop_vptofh_args *ap) { struct vnode *vp = ap->a_vp; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_fid *fhp = (struct fuse_fid *)(ap->a_fhp); _Static_assert(sizeof(struct fuse_fid) <= sizeof(struct fid), "FUSE fid type is too big"); struct mount *mp = vnode_mount(vp); struct fuse_data *data = fuse_get_mpdata(mp); struct vattr va; int err; if (!(data->dataflags & FSESS_EXPORT_SUPPORT)) return EOPNOTSUPP; err = fuse_internal_getattr(vp, &va, curthread->td_ucred, curthread); if (err) return err; /*ip = VTOI(ap->a_vp);*/ /*ufhp = (struct ufid *)ap->a_fhp;*/ fhp->len = sizeof(struct fuse_fid); fhp->nid = fvdat->nid; if (fvdat->generation <= UINT32_MAX) fhp->gen = fvdat->generation; else return EOVERFLOW; return (0); } diff --git a/tests/sys/fs/fusefs/cache.cc b/tests/sys/fs/fusefs/cache.cc index ac62147f15a8..4df262cecd0f 100644 --- a/tests/sys/fs/fusefs/cache.cc +++ b/tests/sys/fs/fusefs/cache.cc @@ -1,219 +1,220 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Alan Somers * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include } #include "mockfs.hh" #include "utils.hh" /* * Tests for thorny cache problems not specific to any one opcode */ using namespace testing; /* * Parameters * - reopen file - If true, close and reopen the file between reads * - cache lookups - If true, allow lookups to be cached * - cache attrs - If true, allow file attributes to be cached * - cache_mode - uncached, writeback, or writethrough * - initial size - File size before truncation * - truncated size - File size after truncation */ typedef tuple, cache_mode, ssize_t, ssize_t> CacheParam; class Cache: public FuseTest, public WithParamInterface { public: bool m_direct_io; Cache(): m_direct_io(false) {}; virtual void SetUp() { int cache_mode = get<1>(GetParam()); switch (cache_mode) { case Uncached: m_direct_io = true; break; case WritebackAsync: m_async = true; /* FALLTHROUGH */ case Writeback: m_init_flags |= FUSE_WRITEBACK_CACHE; /* FALLTHROUGH */ case Writethrough: break; default: FAIL() << "Unknown cache mode"; } + m_noatime = true; // To prevent SETATTR for atime on close FuseTest::SetUp(); if (IsSkipped()) return; } void expect_getattr(uint64_t ino, int times, uint64_t size, uint64_t attr_valid) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).Times(times) .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr_valid = attr_valid; out.body.attr.attr.ino = ino; out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.size = size; }))); } void expect_lookup(const char *relpath, uint64_t ino, uint64_t size, uint64_t entry_valid, uint64_t attr_valid) { EXPECT_LOOKUP(FUSE_ROOT_ID, relpath) .WillRepeatedly(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 1; out.body.entry.attr_valid = attr_valid; out.body.entry.attr.size = size; out.body.entry.entry_valid = entry_valid; }))); } void expect_open(uint64_t ino, int times) { FuseTest::expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO: 0, times); } void expect_release(uint64_t ino, ProcessMockerT r) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_RELEASE && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(r)); } }; // If the server truncates the file behind the kernel's back, the kernel should // invalidate cached pages beyond the new EOF TEST_P(Cache, truncate_by_surprise_invalidates_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefghijklmnopqrstuvwxyz"; uint64_t ino = 42; uint64_t attr_valid, entry_valid; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; bool reopen = get<0>(get<0>(GetParam())); bool cache_lookups = get<1>(get<0>(GetParam())); bool cache_attrs = get<2>(get<0>(GetParam())); ssize_t osize = get<2>(GetParam()); ssize_t nsize = get<3>(GetParam()); ASSERT_LE(osize, bufsize); ASSERT_LE(nsize, bufsize); if (cache_attrs) attr_valid = UINT64_MAX; else attr_valid = 0; if (cache_lookups) entry_valid = UINT64_MAX; else entry_valid = 0; expect_lookup(RELPATH, ino, osize, entry_valid, attr_valid); expect_open(ino, 1); if (!cache_attrs) expect_getattr(ino, 2, osize, attr_valid); expect_read(ino, 0, osize, osize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(osize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, osize)); // Now truncate the file behind the kernel's back. The next read // should discard cache and fetch from disk again. if (reopen) { // Close and reopen the file expect_flush(ino, 1, ReturnErrno(ENOSYS)); expect_release(ino, ReturnErrno(0)); ASSERT_EQ(0, close(fd)); expect_lookup(RELPATH, ino, nsize, entry_valid, attr_valid); expect_open(ino, 1); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); } if (!cache_attrs) expect_getattr(ino, 1, nsize, attr_valid); expect_read(ino, 0, nsize, nsize, CONTENTS); ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)); ASSERT_EQ(nsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, nsize)); leak(fd); } INSTANTIATE_TEST_CASE_P(Cache, Cache, Combine( /* Test every combination that: * - does not cache at least one of entries and attrs * - either doesn't cache attrs, or reopens the file * In the other combinations, the kernel will never learn that * the file's size has changed. */ Values( std::make_tuple(false, false, false), std::make_tuple(false, true, false), std::make_tuple(true, false, false), std::make_tuple(true, false, true), std::make_tuple(true, true, false) ), Values(Writethrough, Writeback), /* Test both reductions and extensions to file size */ Values(20), Values(10, 25) ) ); diff --git a/tests/sys/fs/fusefs/io.cc b/tests/sys/fs/fusefs/io.cc index 65ba1ea19bc3..1502bd263f51 100644 --- a/tests/sys/fs/fusefs/io.cc +++ b/tests/sys/fs/fusefs/io.cc @@ -1,520 +1,521 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include #include #include #include #include } #include "mockfs.hh" #include "utils.hh" /* * For testing I/O like fsx does, but deterministically and without a real * underlying file system */ using namespace testing; const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; static void compare(const void *tbuf, const void *controlbuf, off_t baseofs, ssize_t size) { int i; for (i = 0; i < size; i++) { if (((const char*)tbuf)[i] != ((const char*)controlbuf)[i]) { off_t ofs = baseofs + i; FAIL() << "miscompare at offset " << std::hex << std::showbase << ofs << ". expected = " << std::setw(2) << (unsigned)((const uint8_t*)controlbuf)[i] << " got = " << (unsigned)((const uint8_t*)tbuf)[i]; } } } typedef tuple IoParam; class Io: public FuseTest, public WithParamInterface { public: int m_backing_fd, m_control_fd, m_test_fd; off_t m_filesize; bool m_direct_io; Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0), m_direct_io(false) {}; void SetUp() { m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644); if (m_backing_fd < 0) FAIL() << strerror(errno); m_control_fd = open("control", O_RDWR | O_CREAT | O_TRUNC, 0644); if (m_control_fd < 0) FAIL() << strerror(errno); srandom(22'9'1982); // Seed with my birthday if (get<0>(GetParam())) m_init_flags |= FUSE_ASYNC_READ; m_maxwrite = get<1>(GetParam()); switch (get<2>(GetParam())) { case Uncached: m_direct_io = true; break; case WritebackAsync: m_async = true; /* FALLTHROUGH */ case Writeback: m_init_flags |= FUSE_WRITEBACK_CACHE; /* FALLTHROUGH */ case Writethrough: break; default: FAIL() << "Unknown cache mode"; } + m_noatime = true; // To prevent SETATTR for atime on close FuseTest::SetUp(); if (IsSkipped()) return; if (verbosity > 0) { printf("Test Parameters: init_flags=%#x maxwrite=%#x " "%sasync cache=%s\n", m_init_flags, m_maxwrite, m_async? "" : "no", cache_mode_to_s(get<2>(GetParam()))); } expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1); expect_open(ino, m_direct_io ? FOPEN_DIRECT_IO : 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_WRITE && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) { const char *buf = (const char*)in.body.bytes + sizeof(struct fuse_write_in); ssize_t isize = in.body.write.size; off_t iofs = in.body.write.offset; ASSERT_EQ(isize, pwrite(m_backing_fd, buf, isize, iofs)) << strerror(errno); SET_OUT_HEADER_LEN(out, write); out.body.write.size = isize; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) { ssize_t isize = in.body.write.size; off_t iofs = in.body.write.offset; void *buf = out.body.bytes; ssize_t osize; osize = pread(m_backing_fd, buf, isize, iofs); ASSERT_LE(0, osize) << strerror(errno); out.header.len = sizeof(struct fuse_out_header) + osize; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && (in.body.setattr.valid & FATTR_SIZE)); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) { ASSERT_EQ(0, ftruncate(m_backing_fd, in.body.setattr.size)) << strerror(errno); SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; out.body.attr.attr.mode = S_IFREG | 0755; out.body.attr.attr.size = in.body.setattr.size; out.body.attr.attr_valid = UINT64_MAX; }))); /* Any test that close()s will send FUSE_FLUSH and FUSE_RELEASE */ EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_FLUSH && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnErrno(0))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_RELEASE && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnErrno(0))); m_test_fd = open(FULLPATH, O_RDWR ); EXPECT_LE(0, m_test_fd) << strerror(errno); } void TearDown() { if (m_test_fd >= 0) close(m_test_fd); if (m_backing_fd >= 0) close(m_backing_fd); if (m_control_fd >= 0) close(m_control_fd); FuseTest::TearDown(); leak(m_test_fd); } void do_closeopen() { ASSERT_EQ(0, close(m_test_fd)) << strerror(errno); m_test_fd = open("backing_file", O_RDWR); ASSERT_LE(0, m_test_fd) << strerror(errno); ASSERT_EQ(0, close(m_control_fd)) << strerror(errno); m_control_fd = open("control", O_RDWR); ASSERT_LE(0, m_control_fd) << strerror(errno); } void do_ftruncate(off_t offs) { ASSERT_EQ(0, ftruncate(m_test_fd, offs)) << strerror(errno); ASSERT_EQ(0, ftruncate(m_control_fd, offs)) << strerror(errno); m_filesize = offs; } void do_mapread(ssize_t size, off_t offs) { void *control_buf, *p; off_t pg_offset, page_mask; size_t map_size; page_mask = getpagesize() - 1; pg_offset = offs & page_mask; map_size = pg_offset + size; p = mmap(NULL, map_size, PROT_READ, MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset); ASSERT_NE(p, MAP_FAILED) << strerror(errno); control_buf = malloc(size); ASSERT_NE(nullptr, control_buf) << strerror(errno); ASSERT_EQ(size, pread(m_control_fd, control_buf, size, offs)) << strerror(errno); compare((void*)((char*)p + pg_offset), control_buf, offs, size); ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno); free(control_buf); } void do_read(ssize_t size, off_t offs) { void *test_buf, *control_buf; ssize_t r; test_buf = malloc(size); ASSERT_NE(nullptr, test_buf) << strerror(errno); control_buf = malloc(size); ASSERT_NE(nullptr, control_buf) << strerror(errno); errno = 0; r = pread(m_test_fd, test_buf, size, offs); ASSERT_NE(-1, r) << strerror(errno); ASSERT_EQ(size, r) << "unexpected short read"; r = pread(m_control_fd, control_buf, size, offs); ASSERT_NE(-1, r) << strerror(errno); ASSERT_EQ(size, r) << "unexpected short read"; compare(test_buf, control_buf, offs, size); free(control_buf); free(test_buf); } void do_mapwrite(ssize_t size, off_t offs) { char *buf; void *p; off_t pg_offset, page_mask; size_t map_size; long i; page_mask = getpagesize() - 1; pg_offset = offs & page_mask; map_size = pg_offset + size; buf = (char*)malloc(size); ASSERT_NE(nullptr, buf) << strerror(errno); for (i=0; i < size; i++) buf[i] = random(); if (offs + size > m_filesize) { /* * Must manually extend. vm_mmap_vnode will not implicitly * extend a vnode */ do_ftruncate(offs + size); } p = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, m_test_fd, offs - pg_offset); ASSERT_NE(p, MAP_FAILED) << strerror(errno); bcopy(buf, (char*)p + pg_offset, size); ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs)) << strerror(errno); free(buf); ASSERT_EQ(0, munmap(p, map_size)) << strerror(errno); } void do_write(ssize_t size, off_t offs) { char *buf; long i; buf = (char*)malloc(size); ASSERT_NE(nullptr, buf) << strerror(errno); for (i=0; i < size; i++) buf[i] = random(); ASSERT_EQ(size, pwrite(m_test_fd, buf, size, offs )) << strerror(errno); ASSERT_EQ(size, pwrite(m_control_fd, buf, size, offs)) << strerror(errno); m_filesize = std::max(m_filesize, offs + size); free(buf); } }; class IoCacheable: public Io { public: virtual void SetUp() { Io::SetUp(); } }; /* * Extend a file with dirty data in the last page of the last block. * * fsx -WR -P /tmp -S8 -N3 fsx.bin */ TEST_P(Io, extend_from_dirty_page) { off_t wofs = 0x21a0; ssize_t wsize = 0xf0a8; off_t rofs = 0xb284; ssize_t rsize = 0x9b22; off_t truncsize = 0x28702; do_write(wsize, wofs); do_ftruncate(truncsize); do_read(rsize, rofs); } /* * mapwrite into a newly extended part of a file. * * fsx -c 100 -i 100 -l 524288 -o 131072 -N5 -P /tmp -S19 fsx.bin */ TEST_P(IoCacheable, extend_by_mapwrite) { do_mapwrite(0x849e, 0x29a3a); /* [0x29a3a, 0x31ed7] */ do_mapwrite(0x3994, 0x3c7d8); /* [0x3c7d8, 0x4016b] */ do_read(0xf556, 0x30c16); /* [0x30c16, 0x4016b] */ } /* * When writing the last page of a file, it must be written synchronously. * Otherwise the cached page can become invalid by a subsequent extend * operation. * * fsx -WR -P /tmp -S642 -N3 fsx.bin */ TEST_P(Io, last_page) { do_write(0xcc77, 0x1134f); /* [0x1134f, 0x1dfc5] */ do_write(0xdfa7, 0x2096a); /* [0x2096a, 0x2e910] */ do_read(0xb5b7, 0x1a3aa); /* [0x1a3aa, 0x25960] */ } /* * Read a hole using mmap * * fsx -c 100 -i 100 -l 524288 -o 131072 -N11 -P /tmp -S14 fsx.bin */ TEST_P(IoCacheable, mapread_hole) { do_write(0x123b7, 0xf205); /* [0xf205, 0x215bb] */ do_mapread(0xeeea, 0x2f4c); /* [0x2f4c, 0x11e35] */ } /* * Read a hole from a block that contains some cached data. * * fsx -WR -P /tmp -S55 fsx.bin */ TEST_P(Io, read_hole_from_cached_block) { off_t wofs = 0x160c5; ssize_t wsize = 0xa996; off_t rofs = 0x472e; ssize_t rsize = 0xd8d5; do_write(wsize, wofs); do_read(rsize, rofs); } /* * Truncating a file into a dirty buffer should not causing anything untoward * to happen when that buffer is eventually flushed. * * fsx -WR -P /tmp -S839 -d -N6 fsx.bin */ TEST_P(Io, truncate_into_dirty_buffer) { off_t wofs0 = 0x3bad7; ssize_t wsize0 = 0x4529; off_t wofs1 = 0xc30d; ssize_t wsize1 = 0x5f77; off_t truncsize0 = 0x10916; off_t rofs = 0xdf17; ssize_t rsize = 0x29ff; off_t truncsize1 = 0x152b4; do_write(wsize0, wofs0); do_write(wsize1, wofs1); do_ftruncate(truncsize0); do_read(rsize, rofs); do_ftruncate(truncsize1); close(m_test_fd); } /* * Truncating a file into a dirty buffer should not causing anything untoward * to happen when that buffer is eventually flushed, even when the buffer's * dirty_off is > 0. * * Based on this command with a few steps removed: * fsx -WR -P /tmp -S677 -d -N8 fsx.bin */ TEST_P(Io, truncate_into_dirty_buffer2) { off_t truncsize0 = 0x344f3; off_t wofs = 0x2790c; ssize_t wsize = 0xd86a; off_t truncsize1 = 0x2de38; off_t rofs2 = 0x1fd7a; ssize_t rsize2 = 0xc594; off_t truncsize2 = 0x31e71; /* Sets the file size to something larger than the next write */ do_ftruncate(truncsize0); /* * Creates a dirty buffer. The part in lbn 2 doesn't flush * synchronously. */ do_write(wsize, wofs); /* Truncates part of the dirty buffer created in step 2 */ do_ftruncate(truncsize1); /* XXX ?I don't know why this is necessary? */ do_read(rsize2, rofs2); /* Truncates the dirty buffer */ do_ftruncate(truncsize2); close(m_test_fd); } /* * Regression test for a bug introduced in r348931 * * Sequence of operations: * 1) The first write reads lbn so it can modify it * 2) The first write flushes lbn 3 immediately because it's the end of file * 3) The first write then flushes lbn 4 because it's the end of the file * 4) The second write modifies the cached versions of lbn 3 and 4 * 5) The third write's getblkx invalidates lbn 4's B_CACHE because it's * extending the buffer. Then it flushes lbn 4 because B_DELWRI was set but * B_CACHE was clear. * 6) fuse_write_biobackend erroneously called vfs_bio_clrbuf, putting the * buffer into a weird write-only state. All read operations would return * 0. Writes were apparently still processed, because the buffer's contents * were correct when examined in a core dump. * 7) The third write reads lbn 4 because cache is clear * 9) uiomove dutifully copies new data into the buffer * 10) The buffer's dirty is flushed to lbn 4 * 11) The read returns all zeros because of step 6. * * Based on: * fsx -WR -l 524388 -o 131072 -P /tmp -S6456 -q fsx.bin */ TEST_P(Io, resize_a_valid_buffer_while_extending) { do_write(0x14530, 0x36ee6); /* [0x36ee6, 0x4b415] */ do_write(0x1507c, 0x33256); /* [0x33256, 0x482d1] */ do_write(0x175c, 0x4c03d); /* [0x4c03d, 0x4d798] */ do_read(0xe277, 0x3599c); /* [0x3599c, 0x43c12] */ close(m_test_fd); } INSTANTIATE_TEST_CASE_P(Io, Io, Combine(Bool(), /* async read */ Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */ Values(Uncached, Writethrough, Writeback, WritebackAsync) ) ); INSTANTIATE_TEST_CASE_P(Io, IoCacheable, Combine(Bool(), /* async read */ Values(0x1000, 0x10000, 0x20000), /* m_maxwrite */ Values(Writethrough, Writeback, WritebackAsync) ) ); diff --git a/tests/sys/fs/fusefs/mockfs.cc b/tests/sys/fs/fusefs/mockfs.cc index 99f7ccc61273..8a2d1f910867 100644 --- a/tests/sys/fs/fusefs/mockfs.cc +++ b/tests/sys/fs/fusefs/mockfs.cc @@ -1,1003 +1,1007 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mntopts.h" // for build_iovec } #include #include #include "mockfs.hh" using namespace testing; int verbosity = 0; const char* opcode2opname(uint32_t opcode) { const char* table[] = { "Unknown (opcode 0)", "LOOKUP", "FORGET", "GETATTR", "SETATTR", "READLINK", "SYMLINK", "Unknown (opcode 7)", "MKNOD", "MKDIR", "UNLINK", "RMDIR", "RENAME", "LINK", "OPEN", "READ", "WRITE", "STATFS", "RELEASE", "Unknown (opcode 19)", "FSYNC", "SETXATTR", "GETXATTR", "LISTXATTR", "REMOVEXATTR", "FLUSH", "INIT", "OPENDIR", "READDIR", "RELEASEDIR", "FSYNCDIR", "GETLK", "SETLK", "SETLKW", "ACCESS", "CREATE", "INTERRUPT", "BMAP", "DESTROY", "IOCTL", "POLL", "NOTIFY_REPLY", "BATCH_FORGET", "FALLOCATE", "READDIRPLUS", "RENAME2", "LSEEK", "COPY_FILE_RANGE", }; if (opcode >= nitems(table)) return ("Unknown (opcode > max)"); else return (table[opcode]); } ProcessMockerT ReturnErrno(int error) { return([=](auto in, auto &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; out0->header.error = -error; out0->header.len = sizeof(out0->header); out.push_back(std::move(out0)); }); } /* Helper function used for returning negative cache entries for LOOKUP */ ProcessMockerT ReturnNegativeCache(const struct timespec *entry_valid) { return([=](auto in, auto &out) { /* nodeid means ENOENT and cache it */ std::unique_ptr out0(new mockfs_buf_out); out0->body.entry.nodeid = 0; out0->header.unique = in.header.unique; out0->header.error = 0; out0->body.entry.entry_valid = entry_valid->tv_sec; out0->body.entry.entry_valid_nsec = entry_valid->tv_nsec; SET_OUT_HEADER_LEN(*out0, entry); out.push_back(std::move(out0)); }); } ProcessMockerT ReturnImmediate(std::function f) { return([=](auto& in, auto &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; f(in, *out0); out.push_back(std::move(out0)); }); } void sigint_handler(int __unused sig) { // Don't do anything except interrupt the daemon's read(2) call } void MockFS::debug_request(const mockfs_buf_in &in, ssize_t buflen) { printf("%-11s ino=%2" PRIu64, opcode2opname(in.header.opcode), in.header.nodeid); if (verbosity > 1) { printf(" uid=%5u gid=%5u pid=%5u unique=%" PRIu64 " len=%u" " buflen=%zd", in.header.uid, in.header.gid, in.header.pid, in.header.unique, in.header.len, buflen); } switch (in.header.opcode) { const char *name, *value; case FUSE_ACCESS: printf(" mask=%#x", in.body.access.mask); break; case FUSE_BMAP: printf(" block=%" PRIx64 " blocksize=%#x", in.body.bmap.block, in.body.bmap.blocksize); break; case FUSE_COPY_FILE_RANGE: printf(" off_in=%" PRIu64 " ino_out=%" PRIu64 " off_out=%" PRIu64 " size=%" PRIu64, in.body.copy_file_range.off_in, in.body.copy_file_range.nodeid_out, in.body.copy_file_range.off_out, in.body.copy_file_range.len); if (verbosity > 1) printf(" fh_in=%" PRIu64 " fh_out=%" PRIu64 " flags=%" PRIx64, in.body.copy_file_range.fh_in, in.body.copy_file_range.fh_out, in.body.copy_file_range.flags); break; case FUSE_CREATE: if (m_kernel_minor_version >= 12) name = (const char*)in.body.bytes + sizeof(fuse_create_in); else name = (const char*)in.body.bytes + sizeof(fuse_open_in); printf(" flags=%#x name=%s", in.body.open.flags, name); break; case FUSE_FLUSH: printf(" fh=%#" PRIx64 " lock_owner=%" PRIu64, in.body.flush.fh, in.body.flush.lock_owner); break; case FUSE_FORGET: printf(" nlookup=%" PRIu64, in.body.forget.nlookup); break; case FUSE_FSYNC: printf(" flags=%#x", in.body.fsync.fsync_flags); break; case FUSE_FSYNCDIR: printf(" flags=%#x", in.body.fsyncdir.fsync_flags); break; case FUSE_INTERRUPT: printf(" unique=%" PRIu64, in.body.interrupt.unique); break; case FUSE_LINK: printf(" oldnodeid=%" PRIu64, in.body.link.oldnodeid); break; case FUSE_LISTXATTR: printf(" size=%" PRIu32, in.body.listxattr.size); break; case FUSE_LOOKUP: printf(" %s", in.body.lookup); break; case FUSE_LSEEK: switch (in.body.lseek.whence) { case SEEK_HOLE: printf(" SEEK_HOLE offset=%jd", in.body.lseek.offset); break; case SEEK_DATA: printf(" SEEK_DATA offset=%jd", in.body.lseek.offset); break; default: printf(" whence=%u offset=%jd", in.body.lseek.whence, in.body.lseek.offset); break; } break; case FUSE_MKDIR: name = (const char*)in.body.bytes + sizeof(fuse_mkdir_in); printf(" name=%s mode=%#o umask=%#o", name, in.body.mkdir.mode, in.body.mkdir.umask); break; case FUSE_MKNOD: if (m_kernel_minor_version >= 12) name = (const char*)in.body.bytes + sizeof(fuse_mknod_in); else name = (const char*)in.body.bytes + FUSE_COMPAT_MKNOD_IN_SIZE; printf(" mode=%#o rdev=%x umask=%#o name=%s", in.body.mknod.mode, in.body.mknod.rdev, in.body.mknod.umask, name); break; case FUSE_OPEN: printf(" flags=%#x", in.body.open.flags); break; case FUSE_OPENDIR: printf(" flags=%#x", in.body.opendir.flags); break; case FUSE_READ: printf(" offset=%" PRIu64 " size=%u", in.body.read.offset, in.body.read.size); if (verbosity > 1) printf(" flags=%#x", in.body.read.flags); break; case FUSE_READDIR: printf(" fh=%#" PRIx64 " offset=%" PRIu64 " size=%u", in.body.readdir.fh, in.body.readdir.offset, in.body.readdir.size); break; case FUSE_RELEASE: printf(" fh=%#" PRIx64 " flags=%#x lock_owner=%" PRIu64, in.body.release.fh, in.body.release.flags, in.body.release.lock_owner); break; case FUSE_SETATTR: if (verbosity <= 1) { printf(" valid=%#x", in.body.setattr.valid); break; } if (in.body.setattr.valid & FATTR_MODE) printf(" mode=%#o", in.body.setattr.mode); if (in.body.setattr.valid & FATTR_UID) printf(" uid=%u", in.body.setattr.uid); if (in.body.setattr.valid & FATTR_GID) printf(" gid=%u", in.body.setattr.gid); if (in.body.setattr.valid & FATTR_SIZE) printf(" size=%" PRIu64, in.body.setattr.size); if (in.body.setattr.valid & FATTR_ATIME) printf(" atime=%" PRIu64 ".%u", in.body.setattr.atime, in.body.setattr.atimensec); if (in.body.setattr.valid & FATTR_MTIME) printf(" mtime=%" PRIu64 ".%u", in.body.setattr.mtime, in.body.setattr.mtimensec); if (in.body.setattr.valid & FATTR_FH) printf(" fh=%" PRIu64 "", in.body.setattr.fh); break; case FUSE_SETLK: printf(" fh=%#" PRIx64 " owner=%" PRIu64 " type=%u pid=%u", in.body.setlk.fh, in.body.setlk.owner, in.body.setlk.lk.type, in.body.setlk.lk.pid); if (verbosity >= 2) { printf(" range=[%" PRIu64 "-%" PRIu64 "]", in.body.setlk.lk.start, in.body.setlk.lk.end); } break; case FUSE_SETXATTR: /* * In theory neither the xattr name and value need be * ASCII, but in this test suite they always are. */ name = (const char*)in.body.bytes + sizeof(fuse_setxattr_in); value = name + strlen(name) + 1; printf(" %s=%s", name, value); break; case FUSE_WRITE: printf(" fh=%#" PRIx64 " offset=%" PRIu64 " size=%u write_flags=%u", in.body.write.fh, in.body.write.offset, in.body.write.size, in.body.write.write_flags); if (verbosity > 1) printf(" flags=%#x", in.body.write.flags); break; default: break; } printf("\n"); } /* * Debug a FUSE response. * * This is mostly useful for asynchronous notifications, which don't correspond * to any request */ void MockFS::debug_response(const mockfs_buf_out &out) { const char *name; if (verbosity == 0) return; switch (out.header.error) { case FUSE_NOTIFY_INVAL_ENTRY: name = (const char*)out.body.bytes + sizeof(fuse_notify_inval_entry_out); printf("<- INVAL_ENTRY parent=%" PRIu64 " %s\n", out.body.inval_entry.parent, name); break; case FUSE_NOTIFY_INVAL_INODE: printf("<- INVAL_INODE ino=%" PRIu64 " off=%" PRIi64 " len=%" PRIi64 "\n", out.body.inval_inode.ino, out.body.inval_inode.off, out.body.inval_inode.len); break; case FUSE_NOTIFY_STORE: printf("<- STORE ino=%" PRIu64 " off=%" PRIu64 " size=%" PRIu32 "\n", out.body.store.nodeid, out.body.store.offset, out.body.store.size); break; default: break; } } MockFS::MockFS(int max_readahead, bool allow_other, bool default_permissions, bool push_symlinks_in, bool ro, enum poll_method pm, uint32_t flags, uint32_t kernel_minor_version, uint32_t max_write, bool async, - bool noclusterr, unsigned time_gran, bool nointr) + bool noclusterr, unsigned time_gran, bool nointr, bool noatime) { struct sigaction sa; struct iovec *iov = NULL; int iovlen = 0; char fdstr[15]; const bool trueval = true; m_daemon_id = NULL; m_kernel_minor_version = kernel_minor_version; m_maxreadahead = max_readahead; m_maxwrite = MIN(max_write, max_max_write); m_nready = -1; m_pm = pm; m_time_gran = time_gran; m_quit = false; m_last_unique = 0; if (m_pm == KQ) m_kq = kqueue(); else m_kq = -1; /* * Kyua sets pwd to a testcase-unique tempdir; no need to use * mkdtemp */ /* * googletest doesn't allow ASSERT_ in constructors, so we must throw * instead. */ if (mkdir("mountpoint" , 0755) && errno != EEXIST) throw(std::system_error(errno, std::system_category(), "Couldn't make mountpoint directory")); switch (m_pm) { case BLOCKING: m_fuse_fd = open("/dev/fuse", O_CLOEXEC | O_RDWR); break; default: m_fuse_fd = open("/dev/fuse", O_CLOEXEC | O_RDWR | O_NONBLOCK); break; } if (m_fuse_fd < 0) throw(std::system_error(errno, std::system_category(), "Couldn't open /dev/fuse")); m_pid = getpid(); m_child_pid = -1; build_iovec(&iov, &iovlen, "fstype", __DECONST(void *, "fusefs"), -1); build_iovec(&iov, &iovlen, "fspath", __DECONST(void *, "mountpoint"), -1); build_iovec(&iov, &iovlen, "from", __DECONST(void *, "/dev/fuse"), -1); sprintf(fdstr, "%d", m_fuse_fd); build_iovec(&iov, &iovlen, "fd", fdstr, -1); if (allow_other) { build_iovec(&iov, &iovlen, "allow_other", __DECONST(void*, &trueval), sizeof(bool)); } if (default_permissions) { build_iovec(&iov, &iovlen, "default_permissions", __DECONST(void*, &trueval), sizeof(bool)); } if (push_symlinks_in) { build_iovec(&iov, &iovlen, "push_symlinks_in", __DECONST(void*, &trueval), sizeof(bool)); } if (ro) { build_iovec(&iov, &iovlen, "ro", __DECONST(void*, &trueval), sizeof(bool)); } if (async) { build_iovec(&iov, &iovlen, "async", __DECONST(void*, &trueval), sizeof(bool)); } + if (noatime) { + build_iovec(&iov, &iovlen, "noatime", + __DECONST(void*, &trueval), sizeof(bool)); + } if (noclusterr) { build_iovec(&iov, &iovlen, "noclusterr", __DECONST(void*, &trueval), sizeof(bool)); } if (nointr) { build_iovec(&iov, &iovlen, "nointr", __DECONST(void*, &trueval), sizeof(bool)); } else { build_iovec(&iov, &iovlen, "intr", __DECONST(void*, &trueval), sizeof(bool)); } if (nmount(iov, iovlen, 0)) throw(std::system_error(errno, std::system_category(), "Couldn't mount filesystem")); // Setup default handler ON_CALL(*this, process(_, _)) .WillByDefault(Invoke(this, &MockFS::process_default)); init(flags); bzero(&sa, sizeof(sa)); sa.sa_handler = sigint_handler; sa.sa_flags = 0; /* Don't set SA_RESTART! */ if (0 != sigaction(SIGUSR1, &sa, NULL)) throw(std::system_error(errno, std::system_category(), "Couldn't handle SIGUSR1")); if (pthread_create(&m_daemon_id, NULL, service, (void*)this)) throw(std::system_error(errno, std::system_category(), "Couldn't Couldn't start fuse thread")); } MockFS::~MockFS() { kill_daemon(); if (m_daemon_id != NULL) { pthread_join(m_daemon_id, NULL); m_daemon_id = NULL; } ::unmount("mountpoint", MNT_FORCE); rmdir("mountpoint"); if (m_kq >= 0) close(m_kq); } void MockFS::audit_request(const mockfs_buf_in &in, ssize_t buflen) { uint32_t inlen = in.header.len; size_t fih = sizeof(in.header); switch (in.header.opcode) { case FUSE_LOOKUP: case FUSE_RMDIR: case FUSE_SYMLINK: case FUSE_UNLINK: EXPECT_GT(inlen, fih) << "Missing request filename"; // No redundant information for checking buflen break; case FUSE_FORGET: EXPECT_EQ(inlen, fih + sizeof(in.body.forget)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_GETATTR: EXPECT_EQ(inlen, fih + sizeof(in.body.getattr)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_SETATTR: EXPECT_EQ(inlen, fih + sizeof(in.body.setattr)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_READLINK: EXPECT_EQ(inlen, fih) << "Unexpected request body"; EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_MKNOD: { size_t s; if (m_kernel_minor_version >= 12) s = sizeof(in.body.mknod); else s = FUSE_COMPAT_MKNOD_IN_SIZE; EXPECT_GE(inlen, fih + s) << "Missing request body"; EXPECT_GT(inlen, fih + s) << "Missing request filename"; // No redundant information for checking buflen break; } case FUSE_MKDIR: EXPECT_GE(inlen, fih + sizeof(in.body.mkdir)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.mkdir)) << "Missing request filename"; // No redundant information for checking buflen break; case FUSE_RENAME: EXPECT_GE(inlen, fih + sizeof(in.body.rename)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.rename)) << "Missing request filename"; // No redundant information for checking buflen break; case FUSE_LINK: EXPECT_GE(inlen, fih + sizeof(in.body.link)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.link)) << "Missing request filename"; // No redundant information for checking buflen break; case FUSE_OPEN: EXPECT_EQ(inlen, fih + sizeof(in.body.open)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_READ: EXPECT_EQ(inlen, fih + sizeof(in.body.read)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_WRITE: { size_t s; if (m_kernel_minor_version >= 9) s = sizeof(in.body.write); else s = FUSE_COMPAT_WRITE_IN_SIZE; // I suppose a 0-byte write should be allowed EXPECT_GE(inlen, fih + s) << "Missing request body"; EXPECT_EQ((size_t)buflen, fih + s + in.body.write.size); break; } case FUSE_DESTROY: case FUSE_STATFS: EXPECT_EQ(inlen, fih); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_RELEASE: EXPECT_EQ(inlen, fih + sizeof(in.body.release)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_FSYNC: case FUSE_FSYNCDIR: EXPECT_EQ(inlen, fih + sizeof(in.body.fsync)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_SETXATTR: EXPECT_GE(inlen, fih + sizeof(in.body.setxattr)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.setxattr)) << "Missing request attribute name"; // No redundant information for checking buflen break; case FUSE_GETXATTR: EXPECT_GE(inlen, fih + sizeof(in.body.getxattr)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.getxattr)) << "Missing request attribute name"; // No redundant information for checking buflen break; case FUSE_LISTXATTR: EXPECT_EQ(inlen, fih + sizeof(in.body.listxattr)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_REMOVEXATTR: EXPECT_GT(inlen, fih) << "Missing request attribute name"; // No redundant information for checking buflen break; case FUSE_FLUSH: EXPECT_EQ(inlen, fih + sizeof(in.body.flush)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_INIT: EXPECT_EQ(inlen, fih + sizeof(in.body.init)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_OPENDIR: EXPECT_EQ(inlen, fih + sizeof(in.body.opendir)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_READDIR: EXPECT_EQ(inlen, fih + sizeof(in.body.readdir)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_RELEASEDIR: EXPECT_EQ(inlen, fih + sizeof(in.body.releasedir)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_GETLK: EXPECT_EQ(inlen, fih + sizeof(in.body.getlk)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_SETLK: case FUSE_SETLKW: EXPECT_EQ(inlen, fih + sizeof(in.body.setlk)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_ACCESS: EXPECT_EQ(inlen, fih + sizeof(in.body.access)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_CREATE: EXPECT_GE(inlen, fih + sizeof(in.body.create)) << "Missing request body"; EXPECT_GT(inlen, fih + sizeof(in.body.create)) << "Missing request filename"; // No redundant information for checking buflen break; case FUSE_INTERRUPT: EXPECT_EQ(inlen, fih + sizeof(in.body.interrupt)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_BMAP: EXPECT_EQ(inlen, fih + sizeof(in.body.bmap)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_LSEEK: EXPECT_EQ(inlen, fih + sizeof(in.body.lseek)); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_COPY_FILE_RANGE: EXPECT_EQ(inlen, fih + sizeof(in.body.copy_file_range)); EXPECT_EQ(0ul, in.body.copy_file_range.flags); EXPECT_EQ((size_t)buflen, inlen); break; case FUSE_NOTIFY_REPLY: case FUSE_BATCH_FORGET: case FUSE_FALLOCATE: case FUSE_IOCTL: case FUSE_POLL: case FUSE_READDIRPLUS: FAIL() << "Unsupported opcode?"; default: FAIL() << "Unknown opcode " << in.header.opcode; } /* * Check that the ticket's unique value is sequential. Technically it * doesn't need to be sequential, merely unique. But the current * fusefs driver _does_ make it sequential, and that's easy to check * for. */ if (in.header.unique != ++m_last_unique) FAIL() << "Non-sequential unique value"; } void MockFS::init(uint32_t flags) { ssize_t buflen; std::unique_ptr in(new mockfs_buf_in); std::unique_ptr out(new mockfs_buf_out); read_request(*in, buflen); if (verbosity > 0) debug_request(*in, buflen); audit_request(*in, buflen); ASSERT_EQ(FUSE_INIT, in->header.opcode); out->header.unique = in->header.unique; out->header.error = 0; out->body.init.major = FUSE_KERNEL_VERSION; out->body.init.minor = m_kernel_minor_version;; out->body.init.flags = in->body.init.flags & flags; out->body.init.max_write = m_maxwrite; out->body.init.max_readahead = m_maxreadahead; if (m_kernel_minor_version < 23) { SET_OUT_HEADER_LEN(*out, init_7_22); } else { out->body.init.time_gran = m_time_gran; SET_OUT_HEADER_LEN(*out, init); } write(m_fuse_fd, out.get(), out->header.len); } void MockFS::kill_daemon() { m_quit = true; if (m_daemon_id != NULL) pthread_kill(m_daemon_id, SIGUSR1); // Closing the /dev/fuse file descriptor first allows unmount to // succeed even if the daemon doesn't correctly respond to commands // during the unmount sequence. close(m_fuse_fd); m_fuse_fd = -1; } void MockFS::loop() { std::vector> out; std::unique_ptr in(new mockfs_buf_in); ASSERT_TRUE(in != NULL); while (!m_quit) { ssize_t buflen; bzero(in.get(), sizeof(*in)); read_request(*in, buflen); if (m_quit) break; if (verbosity > 0) debug_request(*in, buflen); audit_request(*in, buflen); if (pid_ok((pid_t)in->header.pid)) { process(*in, out); } else { /* * Reject any requests from unknown processes. Because * we actually do mount a filesystem, plenty of * unrelated system daemons may try to access it. */ if (verbosity > 1) printf("\tREJECTED (wrong pid %d)\n", in->header.pid); process_default(*in, out); } for (auto &it: out) write_response(*it); out.clear(); } } int MockFS::notify_inval_entry(ino_t parent, const char *name, size_t namelen) { std::unique_ptr out(new mockfs_buf_out); out->header.unique = 0; /* 0 means asynchronous notification */ out->header.error = FUSE_NOTIFY_INVAL_ENTRY; out->body.inval_entry.parent = parent; out->body.inval_entry.namelen = namelen; strlcpy((char*)&out->body.bytes + sizeof(out->body.inval_entry), name, sizeof(out->body.bytes) - sizeof(out->body.inval_entry)); out->header.len = sizeof(out->header) + sizeof(out->body.inval_entry) + namelen; debug_response(*out); write_response(*out); return 0; } int MockFS::notify_inval_inode(ino_t ino, off_t off, ssize_t len) { std::unique_ptr out(new mockfs_buf_out); out->header.unique = 0; /* 0 means asynchronous notification */ out->header.error = FUSE_NOTIFY_INVAL_INODE; out->body.inval_inode.ino = ino; out->body.inval_inode.off = off; out->body.inval_inode.len = len; out->header.len = sizeof(out->header) + sizeof(out->body.inval_inode); debug_response(*out); write_response(*out); return 0; } int MockFS::notify_store(ino_t ino, off_t off, const void* data, ssize_t size) { std::unique_ptr out(new mockfs_buf_out); out->header.unique = 0; /* 0 means asynchronous notification */ out->header.error = FUSE_NOTIFY_STORE; out->body.store.nodeid = ino; out->body.store.offset = off; out->body.store.size = size; bcopy(data, (char*)&out->body.bytes + sizeof(out->body.store), size); out->header.len = sizeof(out->header) + sizeof(out->body.store) + size; debug_response(*out); write_response(*out); return 0; } bool MockFS::pid_ok(pid_t pid) { if (pid == m_pid) { return (true); } else if (pid == m_child_pid) { return (true); } else { struct kinfo_proc *ki; bool ok = false; ki = kinfo_getproc(pid); if (ki == NULL) return (false); /* * Allow access by the aio daemon processes so that our tests * can use aio functions */ if (0 == strncmp("aiod", ki->ki_comm, 4)) ok = true; free(ki); return (ok); } } void MockFS::process_default(const mockfs_buf_in& in, std::vector> &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; out0->header.error = -EOPNOTSUPP; out0->header.len = sizeof(out0->header); out.push_back(std::move(out0)); } void MockFS::read_request(mockfs_buf_in &in, ssize_t &res) { int nready = 0; fd_set readfds; pollfd fds[1]; struct kevent changes[1]; struct kevent events[1]; struct timespec timeout_ts; struct timeval timeout_tv; const int timeout_ms = 999; int timeout_int, nfds; int fuse_fd; switch (m_pm) { case BLOCKING: break; case KQ: timeout_ts.tv_sec = 0; timeout_ts.tv_nsec = timeout_ms * 1'000'000; while (nready == 0) { EV_SET(&changes[0], m_fuse_fd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0); nready = kevent(m_kq, &changes[0], 1, &events[0], 1, &timeout_ts); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(events[0].ident, (uintptr_t)m_fuse_fd); if (events[0].flags & EV_ERROR) FAIL() << strerror(events[0].data); else if (events[0].flags & EV_EOF) FAIL() << strerror(events[0].fflags); m_nready = events[0].data; break; case POLL: timeout_int = timeout_ms; fds[0].fd = m_fuse_fd; fds[0].events = POLLIN; while (nready == 0) { nready = poll(fds, 1, timeout_int); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_TRUE(fds[0].revents & POLLIN); break; case SELECT: fuse_fd = m_fuse_fd; if (fuse_fd < 0) break; timeout_tv.tv_sec = 0; timeout_tv.tv_usec = timeout_ms * 1'000; nfds = fuse_fd + 1; while (nready == 0) { FD_ZERO(&readfds); FD_SET(fuse_fd, &readfds); nready = select(nfds, &readfds, NULL, NULL, &timeout_tv); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_TRUE(FD_ISSET(fuse_fd, &readfds)); break; default: FAIL() << "not yet implemented"; } res = read(m_fuse_fd, &in, sizeof(in)); if (res < 0 && !m_quit) { m_quit = true; FAIL() << "read: " << strerror(errno); } ASSERT_TRUE(res >= static_cast(sizeof(in.header)) || m_quit); /* * Inconsistently, fuse_in_header.len is the size of the entire * request,including header, even though fuse_out_header.len excludes * the size of the header. */ ASSERT_TRUE(res == static_cast(in.header.len) || m_quit); } void MockFS::write_response(const mockfs_buf_out &out) { fd_set writefds; pollfd fds[1]; struct kevent changes[1]; struct kevent events[1]; int nready, nfds; ssize_t r; switch (m_pm) { case BLOCKING: break; case KQ: EV_SET(&changes[0], m_fuse_fd, EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, 0); nready = kevent(m_kq, &changes[0], 1, &events[0], 1, NULL); ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(events[0].ident, (uintptr_t)m_fuse_fd); if (events[0].flags & EV_ERROR) FAIL() << strerror(events[0].data); else if (events[0].flags & EV_EOF) FAIL() << strerror(events[0].fflags); m_nready = events[0].data; break; case POLL: fds[0].fd = m_fuse_fd; fds[0].events = POLLOUT; nready = poll(fds, 1, INFTIM); ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(1, nready) << "NULL timeout expired?"; ASSERT_TRUE(fds[0].revents & POLLOUT); break; case SELECT: FD_ZERO(&writefds); FD_SET(m_fuse_fd, &writefds); nfds = m_fuse_fd + 1; nready = select(nfds, NULL, &writefds, NULL, NULL); ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(1, nready) << "NULL timeout expired?"; ASSERT_TRUE(FD_ISSET(m_fuse_fd, &writefds)); break; default: FAIL() << "not yet implemented"; } r = write(m_fuse_fd, &out, out.header.len); ASSERT_TRUE(r > 0 || errno == EAGAIN) << strerror(errno); } void* MockFS::service(void *pthr_data) { MockFS *mock_fs = (MockFS*)pthr_data; mock_fs->loop(); return (NULL); } void MockFS::unmount() { ::unmount("mountpoint", 0); } diff --git a/tests/sys/fs/fusefs/mockfs.hh b/tests/sys/fs/fusefs/mockfs.hh index 600a4b4292c0..dd6d259ca5af 100644 --- a/tests/sys/fs/fusefs/mockfs.hh +++ b/tests/sys/fs/fusefs/mockfs.hh @@ -1,427 +1,428 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include #include "fuse_kernel.h" } #include #define TIME_T_MAX (std::numeric_limits::max()) /* * A pseudo-fuse errno used indicate that a fuse operation should have no * response, at least not immediately */ #define FUSE_NORESPONSE 9999 #define SET_OUT_HEADER_LEN(out, variant) { \ (out).header.len = (sizeof((out).header) + \ sizeof((out).body.variant)); \ } /* * Create an expectation on FUSE_LOOKUP and return it so the caller can set * actions. * * This must be a macro instead of a method because EXPECT_CALL returns a type * with a deleted constructor. */ #define EXPECT_LOOKUP(parent, path) \ EXPECT_CALL(*m_mock, process( \ ResultOf([=](auto in) { \ return (in.header.opcode == FUSE_LOOKUP && \ in.header.nodeid == (parent) && \ strcmp(in.body.lookup, (path)) == 0); \ }, Eq(true)), \ _) \ ) extern int verbosity; /* * The maximum that a test case can set max_write, limited by the buffer * supplied when reading from /dev/fuse. This limitation is imposed by * fusefs-libs, but not by the FUSE protocol. */ const uint32_t max_max_write = 0x20000; /* This struct isn't defined by fuse_kernel.h or libfuse, but it should be */ struct fuse_create_out { struct fuse_entry_out entry; struct fuse_open_out open; }; /* Protocol 7.8 version of struct fuse_attr */ struct fuse_attr_7_8 { uint64_t ino; uint64_t size; uint64_t blocks; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t nlink; uint32_t uid; uint32_t gid; uint32_t rdev; }; /* Protocol 7.8 version of struct fuse_attr_out */ struct fuse_attr_out_7_8 { uint64_t attr_valid; uint32_t attr_valid_nsec; uint32_t dummy; struct fuse_attr_7_8 attr; }; /* Protocol 7.8 version of struct fuse_entry_out */ struct fuse_entry_out_7_8 { uint64_t nodeid; /* Inode ID */ uint64_t generation; /* Inode generation: nodeid:gen must be unique for the fs's lifetime */ uint64_t entry_valid; /* Cache timeout for the name */ uint64_t attr_valid; /* Cache timeout for the attributes */ uint32_t entry_valid_nsec; uint32_t attr_valid_nsec; struct fuse_attr_7_8 attr; }; /* Output struct for FUSE_CREATE for protocol 7.8 servers */ struct fuse_create_out_7_8 { struct fuse_entry_out_7_8 entry; struct fuse_open_out open; }; /* Output struct for FUSE_INIT for protocol 7.22 and earlier servers */ struct fuse_init_out_7_22 { uint32_t major; uint32_t minor; uint32_t max_readahead; uint32_t flags; uint16_t max_background; uint16_t congestion_threshold; uint32_t max_write; }; union fuse_payloads_in { fuse_access_in access; fuse_bmap_in bmap; /* * In fusefs-libs 3.4.2 and below the buffer size is fixed at 0x21000 * minus the header sizes. fusefs-libs 3.4.3 (and FUSE Protocol 7.29) * add a FUSE_MAX_PAGES option that allows it to be greater. * * See fuse_kern_chan.c in fusefs-libs 2.9.9 and below, or * FUSE_DEFAULT_MAX_PAGES_PER_REQ in fusefs-libs 3.4.3 and above. */ uint8_t bytes[ max_max_write + 0x1000 - sizeof(struct fuse_in_header) ]; fuse_copy_file_range_in copy_file_range; fuse_create_in create; fuse_flush_in flush; fuse_fsync_in fsync; fuse_fsync_in fsyncdir; fuse_forget_in forget; fuse_getattr_in getattr; fuse_interrupt_in interrupt; fuse_lk_in getlk; fuse_getxattr_in getxattr; fuse_init_in init; fuse_link_in link; fuse_listxattr_in listxattr; char lookup[0]; fuse_lseek_in lseek; fuse_mkdir_in mkdir; fuse_mknod_in mknod; fuse_open_in open; fuse_open_in opendir; fuse_read_in read; fuse_read_in readdir; fuse_release_in release; fuse_release_in releasedir; fuse_rename_in rename; char rmdir[0]; fuse_setattr_in setattr; fuse_setxattr_in setxattr; fuse_lk_in setlk; fuse_lk_in setlkw; char unlink[0]; fuse_write_in write; }; struct mockfs_buf_in { fuse_in_header header; union fuse_payloads_in body; }; union fuse_payloads_out { fuse_attr_out attr; fuse_attr_out_7_8 attr_7_8; fuse_bmap_out bmap; fuse_create_out create; fuse_create_out_7_8 create_7_8; /* * The protocol places no limits on the size of bytes. Choose * a size big enough for anything we'll test. */ uint8_t bytes[0x20000]; fuse_entry_out entry; fuse_entry_out_7_8 entry_7_8; fuse_lk_out getlk; fuse_getxattr_out getxattr; fuse_init_out init; fuse_init_out_7_22 init_7_22; fuse_lseek_out lseek; /* The inval_entry structure should be followed by the entry's name */ fuse_notify_inval_entry_out inval_entry; fuse_notify_inval_inode_out inval_inode; /* The store structure should be followed by the data to store */ fuse_notify_store_out store; fuse_listxattr_out listxattr; fuse_open_out open; fuse_statfs_out statfs; /* * The protocol places no limits on the length of the string. This is * merely convenient for testing. */ char str[80]; fuse_write_out write; }; struct mockfs_buf_out { fuse_out_header header; union fuse_payloads_out body; /* Default constructor: zero everything */ mockfs_buf_out() { memset(this, 0, sizeof(*this)); } }; /* A function that can be invoked in place of MockFS::process */ typedef std::function> &out)> ProcessMockerT; /* * Helper function used for setting an error expectation for any fuse operation. * The operation will return the supplied error */ ProcessMockerT ReturnErrno(int error); /* Helper function used for returning negative cache entries for LOOKUP */ ProcessMockerT ReturnNegativeCache(const struct timespec *entry_valid); /* Helper function used for returning a single immediate response */ ProcessMockerT ReturnImmediate( std::function f); /* How the daemon should check /dev/fuse for readiness */ enum poll_method { BLOCKING, SELECT, POLL, KQ }; /* * Fake FUSE filesystem * * "Mounts" a filesystem to a temporary directory and services requests * according to the programmed expectations. * * Operates directly on the fusefs(4) kernel API, not the libfuse(3) user api. */ class MockFS { /* * thread id of the fuse daemon thread * * It must run in a separate thread so it doesn't deadlock with the * client test code. */ pthread_t m_daemon_id; /* file descriptor of /dev/fuse control device */ volatile int m_fuse_fd; /* The minor version of the kernel API that this mock daemon targets */ uint32_t m_kernel_minor_version; int m_kq; /* The max_readahead file system option */ uint32_t m_maxreadahead; /* pid of the test process */ pid_t m_pid; /* The unique value of the header of the last received operation */ uint64_t m_last_unique; /* Method the daemon should use for I/O to and from /dev/fuse */ enum poll_method m_pm; /* Timestamp granularity in nanoseconds */ unsigned m_time_gran; void audit_request(const mockfs_buf_in &in, ssize_t buflen); void debug_request(const mockfs_buf_in&, ssize_t buflen); void debug_response(const mockfs_buf_out&); /* Initialize a session after mounting */ void init(uint32_t flags); /* Is pid from a process that might be involved in the test? */ bool pid_ok(pid_t pid); /* Default request handler */ void process_default(const mockfs_buf_in&, std::vector>&); /* Entry point for the daemon thread */ static void* service(void*); /* * Read, but do not process, a single request from the kernel * * @param in Return storage for the FUSE request * @param res Return value of read(2). If positive, the amount of * data read from the fuse device. */ void read_request(mockfs_buf_in& in, ssize_t& res); /* Write a single response back to the kernel */ void write_response(const mockfs_buf_out &out); public: /* pid of child process, for two-process test cases */ pid_t m_child_pid; /* Maximum size of a FUSE_WRITE write */ uint32_t m_maxwrite; /* * Number of events that were available from /dev/fuse after the last * kevent call. Only valid when m_pm = KQ. */ int m_nready; /* Tell the daemon to shut down ASAP */ bool m_quit; /* Create a new mockfs and mount it to a tempdir */ MockFS(int max_readahead, bool allow_other, bool default_permissions, bool push_symlinks_in, bool ro, enum poll_method pm, uint32_t flags, uint32_t kernel_minor_version, uint32_t max_write, bool async, - bool no_clusterr, unsigned time_gran, bool nointr); + bool no_clusterr, unsigned time_gran, bool nointr, + bool noatime); virtual ~MockFS(); /* Kill the filesystem daemon without unmounting the filesystem */ void kill_daemon(); /* Process FUSE requests endlessly */ void loop(); /* * Send an asynchronous notification to invalidate a directory entry. * Similar to libfuse's fuse_lowlevel_notify_inval_entry * * This method will block until the client has responded, so it should * generally be run in a separate thread from request processing. * * @param parent Parent directory's inode number * @param name name of dirent to invalidate * @param namelen size of name, including the NUL */ int notify_inval_entry(ino_t parent, const char *name, size_t namelen); /* * Send an asynchronous notification to invalidate an inode's cached * data and/or attributes. Similar to libfuse's * fuse_lowlevel_notify_inval_inode. * * This method will block until the client has responded, so it should * generally be run in a separate thread from request processing. * * @param ino File's inode number * @param off offset at which to begin invalidation. A * negative offset means to invalidate attributes * only. * @param len Size of region of data to invalidate. 0 means * to invalidate all cached data. */ int notify_inval_inode(ino_t ino, off_t off, ssize_t len); /* * Send an asynchronous notification to store data directly into an * inode's cache. Similar to libfuse's fuse_lowlevel_notify_store. * * This method will block until the client has responded, so it should * generally be run in a separate thread from request processing. * * @param ino File's inode number * @param off Offset at which to store data * @param data Pointer to the data to cache * @param len Size of data */ int notify_store(ino_t ino, off_t off, const void* data, ssize_t size); /* * Request handler * * This method is expected to provide the responses to each FUSE * operation. For an immediate response, push one buffer into out. * For a delayed response, push nothing. For an immediate response * plus a delayed response to an earlier operation, push two bufs. * Test cases must define each response using Googlemock expectations */ MOCK_METHOD2(process, void(const mockfs_buf_in&, std::vector>&)); /* Gracefully unmount */ void unmount(); }; diff --git a/tests/sys/fs/fusefs/read.cc b/tests/sys/fs/fusefs/read.cc index 3cba564affcf..0888fbc913e8 100644 --- a/tests/sys/fs/fusefs/read.cc +++ b/tests/sys/fs/fusefs/read.cc @@ -1,1070 +1,1366 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include #include #include #include #include #include #include #include #include #include } #include "mockfs.hh" #include "utils.hh" using namespace testing; class Read: public FuseTest { public: void expect_lookup(const char *relpath, uint64_t ino, uint64_t size) { FuseTest::expect_lookup(relpath, ino, S_IFREG | 0644, size, 1); } }; class Read_7_8: public FuseTest { public: virtual void SetUp() { m_kernel_minor_version = 8; FuseTest::SetUp(); } void expect_lookup(const char *relpath, uint64_t ino, uint64_t size) { FuseTest::expect_lookup_7_8(relpath, ino, S_IFREG | 0644, size, 1); } }; class AioRead: public Read { public: virtual void SetUp() { if (!is_unsafe_aio_enabled()) GTEST_SKIP() << "vfs.aio.enable_unsafe must be set for this test"; FuseTest::SetUp(); } }; class AsyncRead: public AioRead { virtual void SetUp() { m_init_flags = FUSE_ASYNC_READ; AioRead::SetUp(); } }; class ReadAhead: public Read, public WithParamInterface> { virtual void SetUp() { int val; const char *node = "vfs.maxbcachebuf"; size_t size = sizeof(val); ASSERT_EQ(0, sysctlbyname(node, &val, &size, NULL, 0)) << strerror(errno); m_maxreadahead = val * get<1>(GetParam()); m_noclusterr = get<0>(GetParam()); Read::SetUp(); } }; +class ReadNoatime: public Read { + virtual void SetUp() { + m_noatime = true; + Read::SetUp(); + } +}; + class ReadSigbus: public Read { public: static jmp_buf s_jmpbuf; static void *s_si_addr; void TearDown() { struct sigaction sa; bzero(&sa, sizeof(sa)); sa.sa_handler = SIG_DFL; sigaction(SIGBUS, &sa, NULL); FuseTest::TearDown(); } }; static void handle_sigbus(int signo __unused, siginfo_t *info, void *uap __unused) { ReadSigbus::s_si_addr = info->si_addr; longjmp(ReadSigbus::s_jmpbuf, 1); } jmp_buf ReadSigbus::s_jmpbuf; void *ReadSigbus::s_si_addr; +class TimeGran: public Read, public WithParamInterface { +public: +virtual void SetUp() { + m_time_gran = 1 << GetParam(); + Read::SetUp(); +} +}; + /* AIO reads need to set the header's pid field correctly */ /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236379 */ TEST_F(AioRead, aio_read) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; struct aiocb iocb, *piocb; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); iocb.aio_nbytes = bufsize; iocb.aio_fildes = fd; iocb.aio_buf = buf; iocb.aio_offset = 0; iocb.aio_sigevent.sigev_notify = SIGEV_NONE; ASSERT_EQ(0, aio_read(&iocb)) << strerror(errno); ASSERT_EQ(bufsize, aio_waitcomplete(&piocb, NULL)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } /* * Without the FUSE_ASYNC_READ mount option, fuse(4) should ensure that there * is at most one outstanding read operation per file handle */ TEST_F(AioRead, async_read_disabled) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd; ssize_t bufsize = 50; char buf0[bufsize], buf1[bufsize]; off_t off0 = 0; off_t off1 = m_maxbcachebuf; struct aiocb iocb0, iocb1; volatile sig_atomic_t read_count = 0; expect_lookup(RELPATH, ino, 131072); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == FH && in.body.read.offset == (uint64_t)off0); }, Eq(true)), _) ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) { read_count++; /* Filesystem is slow to respond */ })); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == FH && in.body.read.offset == (uint64_t)off1); }, Eq(true)), _) ).WillRepeatedly(Invoke([&](auto in __unused, auto &out __unused) { read_count++; /* Filesystem is slow to respond */ })); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); /* * Submit two AIO read requests, and respond to neither. If the * filesystem ever gets the second read request, then we failed to * limit outstanding reads. */ iocb0.aio_nbytes = bufsize; iocb0.aio_fildes = fd; iocb0.aio_buf = buf0; iocb0.aio_offset = off0; iocb0.aio_sigevent.sigev_notify = SIGEV_NONE; ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno); iocb1.aio_nbytes = bufsize; iocb1.aio_fildes = fd; iocb1.aio_buf = buf1; iocb1.aio_offset = off1; iocb1.aio_sigevent.sigev_notify = SIGEV_NONE; ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno); /* * Sleep for awhile to make sure the kernel has had a chance to issue * the second read, even though the first has not yet returned */ nap(); EXPECT_EQ(read_count, 1); m_mock->kill_daemon(); /* Wait for AIO activity to complete, but ignore errors */ (void)aio_waitcomplete(NULL, NULL); leak(fd); } /* * With the FUSE_ASYNC_READ mount option, fuse(4) may issue multiple * simultaneous read requests on the same file handle. */ TEST_F(AsyncRead, async_read) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd; ssize_t bufsize = 50; char buf0[bufsize], buf1[bufsize]; off_t off0 = 0; off_t off1 = m_maxbcachebuf; off_t fsize = 2 * m_maxbcachebuf; struct aiocb iocb0, iocb1; sem_t sem; ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno); expect_lookup(RELPATH, ino, fsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == FH && in.body.read.offset == (uint64_t)off0); }, Eq(true)), _) ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) { sem_post(&sem); /* Filesystem is slow to respond */ })); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == FH && in.body.read.offset == (uint64_t)off1); }, Eq(true)), _) ).WillOnce(Invoke([&](auto in __unused, auto &out __unused) { sem_post(&sem); /* Filesystem is slow to respond */ })); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); /* * Submit two AIO read requests, but respond to neither. Ensure that * we received both. */ iocb0.aio_nbytes = bufsize; iocb0.aio_fildes = fd; iocb0.aio_buf = buf0; iocb0.aio_offset = off0; iocb0.aio_sigevent.sigev_notify = SIGEV_NONE; ASSERT_EQ(0, aio_read(&iocb0)) << strerror(errno); iocb1.aio_nbytes = bufsize; iocb1.aio_fildes = fd; iocb1.aio_buf = buf1; iocb1.aio_offset = off1; iocb1.aio_sigevent.sigev_notify = SIGEV_NONE; ASSERT_EQ(0, aio_read(&iocb1)) << strerror(errno); /* Wait until both reads have reached the daemon */ ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno); ASSERT_EQ(0, sem_wait(&sem)) << strerror(errno); m_mock->kill_daemon(); /* Wait for AIO activity to complete, but ignore errors */ (void)aio_waitcomplete(NULL, NULL); leak(fd); } +/* The kernel should update the cached atime attribute during a read */ +TEST_F(Read, atime) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb1, sb2; + uint64_t ino = 42; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb1)); + + /* Ensure atime will be different than it was during lookup */ + nap(); + + ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb2)); + + /* The kernel should automatically update atime during read */ + EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <)); + EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==)); + + leak(fd); +} + +/* The kernel should update the cached atime attribute during a cached read */ +TEST_F(Read, atime_cached) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb1, sb2; + uint64_t ino = 42; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + + ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb1)); + + /* Ensure atime will be different than it was during the first read */ + nap(); + + ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb2)); + + /* The kernel should automatically update atime during read */ + EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, <)); + EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==)); + + leak(fd); +} + +/* dirty atime values should be flushed during close */ +TEST_F(Read, atime_during_close) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb; + uint64_t ino = 42; + const mode_t newmode = 0755; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + EXPECT_CALL(*m_mock, process( + ResultOf([&](auto in) { + uint32_t valid = FATTR_ATIME; + return (in.header.opcode == FUSE_SETATTR && + in.header.nodeid == ino && + in.body.setattr.valid == valid && + (time_t)in.body.setattr.atime == + sb.st_atim.tv_sec && + (long)in.body.setattr.atimensec == + sb.st_atim.tv_nsec); + }, Eq(true)), + _) + ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.ino = ino; + out.body.attr.attr.mode = S_IFREG | newmode; + }))); + expect_flush(ino, 1, ReturnErrno(0)); + expect_release(ino, FuseTest::FH); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + + /* Ensure atime will be different than during lookup */ + nap(); + + ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb)); + + close(fd); +} + +/* A cached atime should be flushed during FUSE_SETATTR */ +TEST_F(Read, atime_during_setattr) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb; + uint64_t ino = 42; + const mode_t newmode = 0755; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + EXPECT_CALL(*m_mock, process( + ResultOf([&](auto in) { + uint32_t valid = FATTR_MODE | FATTR_ATIME; + return (in.header.opcode == FUSE_SETATTR && + in.header.nodeid == ino && + in.body.setattr.valid == valid && + (time_t)in.body.setattr.atime == + sb.st_atim.tv_sec && + (long)in.body.setattr.atimensec == + sb.st_atim.tv_nsec); + }, Eq(true)), + _) + ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.ino = ino; + out.body.attr.attr.mode = S_IFREG | newmode; + }))); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + + /* Ensure atime will be different than during lookup */ + nap(); + + ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb)); + ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno); + + leak(fd); +} + +/* The kernel should flush dirty atime values during close */ /* 0-length reads shouldn't cause any confusion */ TEST_F(Read, direct_io_read_nothing) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd; uint64_t offset = 100; char buf[80]; expect_lookup(RELPATH, ino, offset + 1000); expect_open(ino, FOPEN_DIRECT_IO, 1); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(0, pread(fd, buf, 0, offset)) << strerror(errno); leak(fd); } /* * With direct_io, reads should not fill the cache. They should go straight to * the daemon */ TEST_F(Read, direct_io_pread) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; uint64_t offset = 100; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, offset + bufsize); expect_open(ino, FOPEN_DIRECT_IO, 1); expect_read(ino, offset, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); // With FOPEN_DIRECT_IO, the cache should be bypassed. The server will // get a 2nd read request. expect_read(ino, offset, bufsize, bufsize, CONTENTS); ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } /* * With direct_io, filesystems are allowed to return less data than is * requested. fuse(4) should return a short read to userland. */ TEST_F(Read, direct_io_short_read) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefghijklmnop"; uint64_t ino = 42; int fd; uint64_t offset = 100; ssize_t bufsize = strlen(CONTENTS); ssize_t halfbufsize = bufsize / 2; uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, offset + bufsize); expect_open(ino, FOPEN_DIRECT_IO, 1); expect_read(ino, offset, bufsize, halfbufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(halfbufsize, pread(fd, buf, bufsize, offset)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, halfbufsize)); leak(fd); } TEST_F(Read, eio) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(EIO))); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(-1, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(EIO, errno); leak(fd); } /* * If the server returns a short read when direct io is not in use, that * indicates EOF, because of a server-side truncation. We should invalidate * all cached attributes. We may update the file size, */ TEST_F(Read, eof) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefghijklmnop"; uint64_t ino = 42; int fd; uint64_t offset = 100; ssize_t bufsize = strlen(CONTENTS); ssize_t partbufsize = 3 * bufsize / 4; ssize_t r; uint8_t buf[bufsize]; struct stat sb; expect_lookup(RELPATH, ino, offset + bufsize); expect_open(ino, 0, 1); expect_read(ino, 0, offset + bufsize, offset + partbufsize, CONTENTS); expect_getattr(ino, offset + partbufsize); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); r = pread(fd, buf, bufsize, offset); ASSERT_LE(0, r) << strerror(errno); EXPECT_EQ(partbufsize, r) << strerror(errno); ASSERT_EQ(0, fstat(fd, &sb)); EXPECT_EQ((off_t)(offset + partbufsize), sb.st_size); leak(fd); } /* Like Read.eof, but causes an entire buffer to be invalidated */ TEST_F(Read, eof_of_whole_buffer) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefghijklmnop"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); off_t old_filesize = m_maxbcachebuf * 2 + bufsize; uint8_t buf[bufsize]; struct stat sb; expect_lookup(RELPATH, ino, old_filesize); expect_open(ino, 0, 1); expect_read(ino, 2 * m_maxbcachebuf, bufsize, bufsize, CONTENTS); expect_read(ino, m_maxbcachebuf, m_maxbcachebuf, 0, CONTENTS); expect_getattr(ino, m_maxbcachebuf); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); /* Cache the third block */ ASSERT_EQ(bufsize, pread(fd, buf, bufsize, m_maxbcachebuf * 2)) << strerror(errno); /* Try to read the 2nd block, but it's past EOF */ ASSERT_EQ(0, pread(fd, buf, bufsize, m_maxbcachebuf)) << strerror(errno); ASSERT_EQ(0, fstat(fd, &sb)); EXPECT_EQ((off_t)(m_maxbcachebuf), sb.st_size); leak(fd); } /* * With the keep_cache option, the kernel may keep its read cache across * multiple open(2)s. */ TEST_F(Read, keep_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd0, fd1; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2); expect_open(ino, FOPEN_KEEP_CACHE, 2); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd0 = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd0) << strerror(errno); ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno); fd1 = open(FULLPATH, O_RDWR); ASSERT_LE(0, fd1) << strerror(errno); /* * This read should be serviced by cache, even though it's on the other * file descriptor */ ASSERT_EQ(bufsize, read(fd1, buf, bufsize)) << strerror(errno); leak(fd0); leak(fd1); } /* * Without the keep_cache option, the kernel should drop its read caches on * every open */ TEST_F(Read, keep_cache_disabled) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd0, fd1; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, bufsize, 2); expect_open(ino, 0, 2); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd0 = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd0) << strerror(errno); ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno); fd1 = open(FULLPATH, O_RDWR); ASSERT_LE(0, fd1) << strerror(errno); /* * This read should not be serviced by cache, even though it's on the * original file descriptor */ expect_read(ino, 0, bufsize, bufsize, CONTENTS); ASSERT_EQ(0, lseek(fd0, 0, SEEK_SET)) << strerror(errno); ASSERT_EQ(bufsize, read(fd0, buf, bufsize)) << strerror(errno); leak(fd0); leak(fd1); } TEST_F(Read, mmap) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t len; size_t bufsize = strlen(CONTENTS); void *p; len = getpagesize(); expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == Read::FH && in.body.read.offset == 0 && in.body.read.size == bufsize); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(struct fuse_out_header) + bufsize; memmove(out.body.bytes, CONTENTS, bufsize); }))); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); ASSERT_NE(MAP_FAILED, p) << strerror(errno); ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize)); ASSERT_EQ(0, munmap(p, len)) << strerror(errno); leak(fd); } +/* + * The kernel should not update the cached atime attribute during a read, if + * MNT_NOATIME is used. + */ +TEST_F(ReadNoatime, atime) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb1, sb2; + uint64_t ino = 42; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb1)); + + nap(); + + ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb2)); + + /* The kernel should not update atime during read */ + EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==)); + + leak(fd); +} + +/* + * The kernel should not update the cached atime attribute during a cached + * read, if MNT_NOATIME is used. + */ +TEST_F(ReadNoatime, atime_cached) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + struct stat sb1, sb2; + uint64_t ino = 42; + int fd; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + + fd = open(FULLPATH, O_RDONLY); + ASSERT_LE(0, fd) << strerror(errno); + + ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb1)); + + nap(); + + ASSERT_EQ(bufsize, pread(fd, buf, bufsize, 0)) << strerror(errno); + ASSERT_EQ(0, fstat(fd, &sb2)); + + /* The kernel should automatically update atime during read */ + EXPECT_TRUE(timespeccmp(&sb1.st_atim, &sb2.st_atim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_ctim, &sb2.st_ctim, ==)); + EXPECT_TRUE(timespeccmp(&sb1.st_mtim, &sb2.st_mtim, ==)); + + leak(fd); +} + /* Read of an mmap()ed file fails */ TEST_F(ReadSigbus, mmap_eio) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; struct sigaction sa; uint64_t ino = 42; int fd; ssize_t len; size_t bufsize = strlen(CONTENTS); void *p; len = getpagesize(); expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == Read::FH); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnErrno(EIO))); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); ASSERT_NE(MAP_FAILED, p) << strerror(errno); /* Accessing the mapped page should return SIGBUS. */ bzero(&sa, sizeof(sa)); sa.sa_handler = SIG_DFL; sa.sa_sigaction = handle_sigbus; sa.sa_flags = SA_RESETHAND | SA_SIGINFO; ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno); if (setjmp(ReadSigbus::s_jmpbuf) == 0) { atomic_signal_fence(std::memory_order::memory_order_seq_cst); volatile char x __unused = *(volatile char*)p; FAIL() << "shouldn't get here"; } ASSERT_EQ(p, ReadSigbus::s_si_addr); ASSERT_EQ(0, munmap(p, len)) << strerror(errno); leak(fd); } /* * A read via mmap comes up short, indicating that the file was truncated * server-side. */ TEST_F(Read, mmap_eof) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t len; size_t bufsize = strlen(CONTENTS); struct stat sb; void *p; len = getpagesize(); expect_lookup(RELPATH, ino, m_maxbcachebuf); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == Read::FH && in.body.read.offset == 0 && in.body.read.size == (uint32_t)m_maxbcachebuf); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(struct fuse_out_header) + bufsize; memmove(out.body.bytes, CONTENTS, bufsize); }))); expect_getattr(ino, bufsize); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); ASSERT_NE(MAP_FAILED, p) << strerror(errno); /* The file size should be automatically truncated */ ASSERT_EQ(0, memcmp(p, CONTENTS, bufsize)); ASSERT_EQ(0, fstat(fd, &sb)) << strerror(errno); EXPECT_EQ((off_t)bufsize, sb.st_size); ASSERT_EQ(0, munmap(p, len)) << strerror(errno); leak(fd); } /* * During VOP_GETPAGES, the FUSE server fails a FUSE_GETATTR operation. This * almost certainly indicates a buggy FUSE server, and our goal should be not * to panic. Instead, generate SIGBUS. */ TEST_F(ReadSigbus, mmap_getblksz_fail) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; struct sigaction sa; Sequence seq; uint64_t ino = 42; int fd; ssize_t len; size_t bufsize = strlen(CONTENTS); mode_t mode = S_IFREG | 0644; void *p; len = getpagesize(); FuseTest::expect_lookup(RELPATH, ino, mode, bufsize, 1, 0); /* Expect two GETATTR calls that succeed, followed by one that fail. */ EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).Times(2) .InSequence(seq) .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; out.body.attr.attr.mode = mode; out.body.attr.attr.size = bufsize; out.body.attr.attr_valid = 0; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).InSequence(seq) .WillRepeatedly(Invoke(ReturnErrno(EIO))); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ); }, Eq(true)), _) ).Times(0); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); p = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); ASSERT_NE(MAP_FAILED, p) << strerror(errno); /* Accessing the mapped page should return SIGBUS. */ bzero(&sa, sizeof(sa)); sa.sa_handler = SIG_DFL; sa.sa_sigaction = handle_sigbus; sa.sa_flags = SA_RESETHAND | SA_SIGINFO; ASSERT_EQ(0, sigaction(SIGBUS, &sa, NULL)) << strerror(errno); if (setjmp(ReadSigbus::s_jmpbuf) == 0) { atomic_signal_fence(std::memory_order::memory_order_seq_cst); volatile char x __unused = *(volatile char*)p; FAIL() << "shouldn't get here"; } ASSERT_EQ(p, ReadSigbus::s_si_addr); ASSERT_EQ(0, munmap(p, len)) << strerror(errno); leak(fd); } /* * Just as when FOPEN_DIRECT_IO is used, reads with O_DIRECT should bypass * cache and to straight to the daemon */ TEST_F(Read, o_direct) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); // Fill the cache ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); // Reads with o_direct should bypass the cache expect_read(ino, 0, bufsize, bufsize, CONTENTS); ASSERT_EQ(0, fcntl(fd, F_SETFL, O_DIRECT)) << strerror(errno); ASSERT_EQ(0, lseek(fd, 0, SEEK_SET)) << strerror(errno); ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } TEST_F(Read, pread) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; /* * Set offset to a maxbcachebuf boundary so we'll be sure what offset * to read from. Without this, the read might start at a lower offset. */ uint64_t offset = m_maxbcachebuf; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, offset + bufsize); expect_open(ino, 0, 1); expect_read(ino, offset, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(bufsize, pread(fd, buf, bufsize, offset)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } TEST_F(Read, read) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } TEST_F(Read_7_8, read) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); expect_read(ino, 0, bufsize, bufsize, CONTENTS); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); leak(fd); } /* * If cacheing is enabled, the kernel should try to read an entire cache block * at a time. */ TEST_F(Read, cache_block) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS0 = "abcdefghijklmnop"; uint64_t ino = 42; int fd; ssize_t bufsize = 8; ssize_t filesize = m_maxbcachebuf * 2; char *contents; char buf[bufsize]; const char *contents1 = CONTENTS0 + bufsize; contents = (char*)calloc(1, filesize); ASSERT_NE(nullptr, contents); memmove(contents, CONTENTS0, strlen(CONTENTS0)); expect_lookup(RELPATH, ino, filesize); expect_open(ino, 0, 1); expect_read(ino, 0, m_maxbcachebuf, m_maxbcachebuf, contents); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS0, bufsize)); /* A subsequent read should be serviced by cache */ ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, contents1, bufsize)); leak(fd); free(contents); } /* Reading with sendfile should work (though it obviously won't be 0-copy) */ TEST_F(Read, sendfile) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; size_t bufsize = strlen(CONTENTS); uint8_t buf[bufsize]; int sp[2]; off_t sbytes; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == Read::FH && in.body.read.offset == 0 && in.body.read.size == bufsize); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(struct fuse_out_header) + bufsize; memmove(out.body.bytes, CONTENTS, bufsize); }))); ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp)) << strerror(errno); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0)) << strerror(errno); ASSERT_EQ(static_cast(bufsize), read(sp[0], buf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(buf, CONTENTS, bufsize)); close(sp[1]); close(sp[0]); leak(fd); } /* sendfile should fail gracefully if fuse declines the read */ /* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236466 */ TEST_F(Read, sendfile_eio) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const char *CONTENTS = "abcdefgh"; uint64_t ino = 42; int fd; ssize_t bufsize = strlen(CONTENTS); int sp[2]; off_t sbytes; expect_lookup(RELPATH, ino, bufsize); expect_open(ino, 0, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(EIO))); ASSERT_EQ(0, socketpair(PF_LOCAL, SOCK_STREAM, 0, sp)) << strerror(errno); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_NE(0, sendfile(fd, sp[1], 0, bufsize, NULL, &sbytes, 0)); close(sp[1]); close(sp[0]); leak(fd); } /* * Sequential reads should use readahead. And if allowed, large reads should * be clustered. */ TEST_P(ReadAhead, readahead) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd, maxcontig, clustersize; ssize_t bufsize = 4 * m_maxbcachebuf; ssize_t filesize = bufsize; uint64_t len; char *rbuf, *contents; off_t offs; contents = (char*)malloc(filesize); ASSERT_NE(nullptr, contents); memset(contents, 'X', filesize); rbuf = (char*)calloc(1, bufsize); expect_lookup(RELPATH, ino, filesize); expect_open(ino, 0, 1); maxcontig = m_noclusterr ? m_maxbcachebuf : m_maxbcachebuf + m_maxreadahead; clustersize = MIN(maxcontig, m_maxphys); for (offs = 0; offs < bufsize; offs += clustersize) { len = std::min((size_t)clustersize, (size_t)(filesize - offs)); expect_read(ino, offs, len, len, contents + offs); } fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); /* Set the internal readahead counter to a "large" value */ ASSERT_EQ(0, fcntl(fd, F_READAHEAD, 1'000'000'000)) << strerror(errno); ASSERT_EQ(bufsize, read(fd, rbuf, bufsize)) << strerror(errno); ASSERT_EQ(0, memcmp(rbuf, contents, bufsize)); leak(fd); free(rbuf); free(contents); } INSTANTIATE_TEST_CASE_P(RA, ReadAhead, Values(tuple(false, 0), tuple(false, 1), tuple(false, 2), tuple(false, 3), tuple(true, 0), tuple(true, 1), tuple(true, 2))); + +/* fuse_init_out.time_gran controls the granularity of timestamps */ +TEST_P(TimeGran, atime_during_setattr) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const char *CONTENTS = "abcdefgh"; + ssize_t bufsize = strlen(CONTENTS); + uint8_t buf[bufsize]; + uint64_t ino = 42; + const mode_t newmode = 0755; + int fd; + + expect_lookup(RELPATH, ino, bufsize); + expect_open(ino, 0, 1); + expect_read(ino, 0, bufsize, bufsize, CONTENTS); + EXPECT_CALL(*m_mock, process( + ResultOf([=](auto in) { + uint32_t valid = FATTR_MODE | FATTR_ATIME; + return (in.header.opcode == FUSE_SETATTR && + in.header.nodeid == ino && + in.body.setattr.valid == valid && + in.body.setattr.atimensec % m_time_gran == 0); + }, Eq(true)), + _) + ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.ino = ino; + out.body.attr.attr.mode = S_IFREG | newmode; + }))); + + fd = open(FULLPATH, O_RDWR); + ASSERT_LE(0, fd) << strerror(errno); + + ASSERT_EQ(bufsize, read(fd, buf, bufsize)) << strerror(errno); + ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno); + + leak(fd); +} + +INSTANTIATE_TEST_CASE_P(TG, TimeGran, Range(0u, 10u)); diff --git a/tests/sys/fs/fusefs/utils.cc b/tests/sys/fs/fusefs/utils.cc index 4b59f6e26e12..16dfc9c52939 100644 --- a/tests/sys/fs/fusefs/utils.cc +++ b/tests/sys/fs/fusefs/utils.cc @@ -1,650 +1,650 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include #include #include #include #include #include #include #include #include #include } #include #include "mockfs.hh" #include "utils.hh" using namespace testing; /* * The default max_write is set to this formula in libfuse, though * individual filesystems can lower it. The "- 4096" was added in * commit 154ffe2, with the commit message "fix". */ const uint32_t libfuse_max_write = 32 * getpagesize() + 0x1000 - 4096; /* Check that fusefs(4) is accessible and the current user can mount(2) */ void check_environment() { const char *devnode = "/dev/fuse"; const char *bsdextended_node = "security.mac.bsdextended.enabled"; int bsdextended_val = 0; size_t bsdextended_size = sizeof(bsdextended_val); int bsdextended_found; const char *usermount_node = "vfs.usermount"; int usermount_val = 0; size_t usermount_size = sizeof(usermount_val); if (eaccess(devnode, R_OK | W_OK)) { if (errno == ENOENT) { GTEST_SKIP() << devnode << " does not exist"; } else if (errno == EACCES) { GTEST_SKIP() << devnode << " is not accessible by the current user"; } else { GTEST_SKIP() << strerror(errno); } } // mac_bsdextended(4), when enabled, generates many more GETATTR // operations. The fusefs tests' expectations don't account for those, // and adding extra code to handle them obfuscates the real purpose of // the tests. Better just to skip the fusefs tests if mac_bsdextended // is enabled. bsdextended_found = sysctlbyname(bsdextended_node, &bsdextended_val, &bsdextended_size, NULL, 0); if (bsdextended_found == 0 && bsdextended_val != 0) GTEST_SKIP() << "The fusefs tests are incompatible with mac_bsdextended."; ASSERT_EQ(sysctlbyname(usermount_node, &usermount_val, &usermount_size, NULL, 0), 0); if (geteuid() != 0 && !usermount_val) GTEST_SKIP() << "current user is not allowed to mount"; } const char *cache_mode_to_s(enum cache_mode cm) { switch (cm) { case Uncached: return "Uncached"; case Writethrough: return "Writethrough"; case Writeback: return "Writeback"; case WritebackAsync: return "WritebackAsync"; default: return "Unknown"; } } bool is_unsafe_aio_enabled(void) { const char *node = "vfs.aio.enable_unsafe"; int val = 0; size_t size = sizeof(val); if (sysctlbyname(node, &val, &size, NULL, 0)) { perror("sysctlbyname"); return (false); } return (val != 0); } class FuseEnv: public Environment { virtual void SetUp() { } }; void FuseTest::SetUp() { const char *maxbcachebuf_node = "vfs.maxbcachebuf"; const char *maxphys_node = "kern.maxphys"; int val = 0; size_t size = sizeof(val); /* * XXX check_environment should be called from FuseEnv::SetUp, but * can't due to https://github.com/google/googletest/issues/2189 */ check_environment(); if (IsSkipped()) return; ASSERT_EQ(0, sysctlbyname(maxbcachebuf_node, &val, &size, NULL, 0)) << strerror(errno); m_maxbcachebuf = val; ASSERT_EQ(0, sysctlbyname(maxphys_node, &val, &size, NULL, 0)) << strerror(errno); m_maxphys = val; /* * Set the default max_write to a distinct value from MAXPHYS to catch * bugs that confuse the two. */ if (m_maxwrite == 0) m_maxwrite = MIN(libfuse_max_write, (uint32_t)m_maxphys / 2); try { m_mock = new MockFS(m_maxreadahead, m_allow_other, m_default_permissions, m_push_symlinks_in, m_ro, m_pm, m_init_flags, m_kernel_minor_version, m_maxwrite, m_async, m_noclusterr, m_time_gran, - m_nointr); + m_nointr, m_noatime); /* * FUSE_ACCESS is called almost universally. Expecting it in * each test case would be super-annoying. Instead, set a * default expectation for FUSE_ACCESS and return ENOSYS. * * Individual test cases can override this expectation since * googlemock evaluates expectations in LIFO order. */ EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_ACCESS); }, Eq(true)), _) ).Times(AnyNumber()) .WillRepeatedly(Invoke(ReturnErrno(ENOSYS))); /* * FUSE_BMAP is called for most test cases that read data. Set * a default expectation and return ENOSYS. * * Individual test cases can override this expectation since * googlemock evaluates expectations in LIFO order. */ EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_BMAP); }, Eq(true)), _) ).Times(AnyNumber()) .WillRepeatedly(Invoke(ReturnErrno(ENOSYS))); } catch (std::system_error err) { FAIL() << err.what(); } } void FuseTest::expect_access(uint64_t ino, mode_t access_mode, int error) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_ACCESS && in.header.nodeid == ino && in.body.access.mask == access_mode); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(error))); } void FuseTest::expect_destroy(int error) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_DESTROY); }, Eq(true)), _) ).WillOnce(Invoke( ReturnImmediate([&](auto in, auto& out) { m_mock->m_quit = true; out.header.len = sizeof(out.header); out.header.unique = in.header.unique; out.header.error = -error; }))); } void FuseTest::expect_flush(uint64_t ino, int times, ProcessMockerT r) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_FLUSH && in.header.nodeid == ino); }, Eq(true)), _) ).Times(times) .WillRepeatedly(Invoke(r)); } void FuseTest::expect_forget(uint64_t ino, uint64_t nlookup, sem_t *sem) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_FORGET && in.header.nodeid == ino && in.body.forget.nlookup == nlookup); }, Eq(true)), _) ).WillOnce(Invoke([=](auto in __unused, auto &out __unused) { if (sem != NULL) sem_post(sem); /* FUSE_FORGET has no response! */ })); } void FuseTest::expect_getattr(uint64_t ino, uint64_t size) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.size = size; out.body.attr.attr_valid = UINT64_MAX; }))); } void FuseTest::expect_getxattr(uint64_t ino, const char *attr, ProcessMockerT r) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { const char *a = (const char*)in.body.bytes + sizeof(fuse_getxattr_in); return (in.header.opcode == FUSE_GETXATTR && in.header.nodeid == ino && 0 == strcmp(attr, a)); }, Eq(true)), _) ).WillOnce(Invoke(r)); } void FuseTest::expect_lookup(const char *relpath, uint64_t ino, mode_t mode, uint64_t size, int times, uint64_t attr_valid, uid_t uid, gid_t gid) { EXPECT_LOOKUP(FUSE_ROOT_ID, relpath) .Times(times) .WillRepeatedly(Invoke( ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = mode; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 1; out.body.entry.attr_valid = attr_valid; out.body.entry.attr.size = size; out.body.entry.attr.uid = uid; out.body.entry.attr.gid = gid; }))); } void FuseTest::expect_lookup_7_8(const char *relpath, uint64_t ino, mode_t mode, uint64_t size, int times, uint64_t attr_valid, uid_t uid, gid_t gid) { EXPECT_LOOKUP(FUSE_ROOT_ID, relpath) .Times(times) .WillRepeatedly(Invoke( ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry_7_8); out.body.entry.attr.mode = mode; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 1; out.body.entry.attr_valid = attr_valid; out.body.entry.attr.size = size; out.body.entry.attr.uid = uid; out.body.entry.attr.gid = gid; }))); } void FuseTest::expect_open(uint64_t ino, uint32_t flags, int times) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_OPEN && in.header.nodeid == ino); }, Eq(true)), _) ).Times(times) .WillRepeatedly(Invoke( ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(out.header); SET_OUT_HEADER_LEN(out, open); out.body.open.fh = FH; out.body.open.open_flags = flags; }))); } void FuseTest::expect_opendir(uint64_t ino) { /* opendir(3) calls fstatfs */ EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_STATFS); }, Eq(true)), _) ).WillRepeatedly(Invoke( ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, statfs); }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_OPENDIR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(out.header); SET_OUT_HEADER_LEN(out, open); out.body.open.fh = FH; }))); } void FuseTest::expect_read(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, const void *contents, int flags) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ && in.header.nodeid == ino && in.body.read.fh == FH && in.body.read.offset == offset && in.body.read.size == isize && (flags == -1 ? (in.body.read.flags == O_RDONLY || in.body.read.flags == O_RDWR) : in.body.read.flags == (uint32_t)flags)); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(struct fuse_out_header) + osize; memmove(out.body.bytes, contents, osize); }))).RetiresOnSaturation(); } void FuseTest::expect_readdir(uint64_t ino, uint64_t off, std::vector &ents) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READDIR && in.header.nodeid == ino && in.body.readdir.fh == FH && in.body.readdir.offset == off); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([=](auto in, auto& out) { struct fuse_dirent *fde = (struct fuse_dirent*)&(out.body); int i = 0; out.header.error = 0; out.header.len = 0; for (const auto& it: ents) { size_t entlen, entsize; fde->ino = it.d_fileno; fde->off = it.d_off; fde->type = it.d_type; fde->namelen = it.d_namlen; strncpy(fde->name, it.d_name, it.d_namlen); entlen = FUSE_NAME_OFFSET + fde->namelen; entsize = FUSE_DIRENT_SIZE(fde); /* * The FUSE protocol does not require zeroing out the * unused portion of the name. But it's a good * practice to prevent information disclosure to the * FUSE client, even though the client is usually the * kernel */ memset(fde->name + fde->namelen, 0, entsize - entlen); if (out.header.len + entsize > in.body.read.size) { printf("Overflow in readdir expectation: i=%d\n" , i); break; } out.header.len += entsize; fde = (struct fuse_dirent*) ((intmax_t*)fde + entsize / sizeof(intmax_t)); i++; } out.header.len += sizeof(out.header); }))); } void FuseTest::expect_release(uint64_t ino, uint64_t fh) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_RELEASE && in.header.nodeid == ino && in.body.release.fh == fh); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(0))); } void FuseTest::expect_releasedir(uint64_t ino, ProcessMockerT r) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_RELEASEDIR && in.header.nodeid == ino && in.body.release.fh == FH); }, Eq(true)), _) ).WillOnce(Invoke(r)); } void FuseTest::expect_unlink(uint64_t parent, const char *path, int error) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_UNLINK && 0 == strcmp(path, in.body.unlink) && in.header.nodeid == parent); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(error))); } void FuseTest::expect_write(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, uint32_t flags_set, uint32_t flags_unset, const void *contents) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { const char *buf = (const char*)in.body.bytes + sizeof(struct fuse_write_in); bool pid_ok; uint32_t wf = in.body.write.write_flags; if (wf & FUSE_WRITE_CACHE) pid_ok = true; else pid_ok = (pid_t)in.header.pid == getpid(); return (in.header.opcode == FUSE_WRITE && in.header.nodeid == ino && in.body.write.fh == FH && in.body.write.offset == offset && in.body.write.size == isize && pid_ok && (wf & flags_set) == flags_set && (wf & flags_unset) == 0 && (in.body.write.flags == O_WRONLY || in.body.write.flags == O_RDWR) && 0 == bcmp(buf, contents, isize)); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, write); out.body.write.size = osize; }))); } void FuseTest::expect_write_7_8(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, const void *contents) { EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { const char *buf = (const char*)in.body.bytes + FUSE_COMPAT_WRITE_IN_SIZE; bool pid_ok = (pid_t)in.header.pid == getpid(); return (in.header.opcode == FUSE_WRITE && in.header.nodeid == ino && in.body.write.fh == FH && in.body.write.offset == offset && in.body.write.size == isize && pid_ok && 0 == bcmp(buf, contents, isize)); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, write); out.body.write.size = osize; }))); } void get_unprivileged_id(uid_t *uid, gid_t *gid) { struct passwd *pw; struct group *gr; /* * First try "tests", Kyua's default unprivileged user. XXX after * GoogleTest gains a proper Kyua wrapper, get this with the Kyua API */ pw = getpwnam("tests"); if (pw == NULL) { /* Fall back to "nobody" */ pw = getpwnam("nobody"); } if (pw == NULL) GTEST_SKIP() << "Test requires an unprivileged user"; /* Use group "nobody", which is Kyua's default unprivileged group */ gr = getgrnam("nobody"); if (gr == NULL) GTEST_SKIP() << "Test requires an unprivileged group"; *uid = pw->pw_uid; *gid = gr->gr_gid; } void FuseTest::fork(bool drop_privs, int *child_status, std::function parent_func, std::function child_func) { sem_t *sem; int mprot = PROT_READ | PROT_WRITE; int mflags = MAP_ANON | MAP_SHARED; pid_t child; uid_t uid; gid_t gid; if (drop_privs) { get_unprivileged_id(&uid, &gid); if (IsSkipped()) return; } sem = (sem_t*)mmap(NULL, sizeof(*sem), mprot, mflags, -1, 0); ASSERT_NE(MAP_FAILED, sem) << strerror(errno); ASSERT_EQ(0, sem_init(sem, 1, 0)) << strerror(errno); if ((child = ::fork()) == 0) { /* In child */ int err = 0; if (sem_wait(sem)) { perror("sem_wait"); err = 1; goto out; } if (drop_privs && 0 != setegid(gid)) { perror("setegid"); err = 1; goto out; } if (drop_privs && 0 != setreuid(-1, uid)) { perror("setreuid"); err = 1; goto out; } err = child_func(); out: sem_destroy(sem); _exit(err); } else if (child > 0) { /* * In parent. Cleanup must happen here, because it's still * privileged. */ m_mock->m_child_pid = child; ASSERT_NO_FATAL_FAILURE(parent_func()); /* Signal the child process to go */ ASSERT_EQ(0, sem_post(sem)) << strerror(errno); ASSERT_LE(0, wait(child_status)) << strerror(errno); } else { FAIL() << strerror(errno); } munmap(sem, sizeof(*sem)); return; } static void usage(char* progname) { fprintf(stderr, "Usage: %s [-v]\n\t-v increase verbosity\n", progname); exit(2); } int main(int argc, char **argv) { int ch; FuseEnv *fuse_env = new FuseEnv; InitGoogleTest(&argc, argv); AddGlobalTestEnvironment(fuse_env); while ((ch = getopt(argc, argv, "v")) != -1) { switch (ch) { case 'v': verbosity++; break; default: usage(argv[0]); break; } } return (RUN_ALL_TESTS()); } diff --git a/tests/sys/fs/fusefs/utils.hh b/tests/sys/fs/fusefs/utils.hh index 5d5c5290a60d..a6f1d63ada6b 100644 --- a/tests/sys/fs/fusefs/utils.hh +++ b/tests/sys/fs/fusefs/utils.hh @@ -1,256 +1,258 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ struct _sem; typedef struct _sem sem_t; struct _dirdesc; typedef struct _dirdesc DIR; /* Nanoseconds to sleep, for tests that must */ #define NAP_NS (100'000'000) void get_unprivileged_id(uid_t *uid, gid_t *gid); inline void nap() { usleep(NAP_NS / 1000); } enum cache_mode { Uncached, Writethrough, Writeback, WritebackAsync }; const char *cache_mode_to_s(enum cache_mode cm); bool is_unsafe_aio_enabled(void); extern const uint32_t libfuse_max_write; class FuseTest : public ::testing::Test { protected: uint32_t m_maxreadahead; uint32_t m_maxwrite; uint32_t m_init_flags; bool m_allow_other; bool m_default_permissions; uint32_t m_kernel_minor_version; enum poll_method m_pm; + bool m_noatime; bool m_push_symlinks_in; bool m_ro; bool m_async; bool m_noclusterr; bool m_nointr; unsigned m_time_gran; MockFS *m_mock = NULL; const static uint64_t FH = 0xdeadbeef1a7ebabe; public: int m_maxbcachebuf; int m_maxphys; FuseTest(): m_maxreadahead(0), m_maxwrite(0), m_init_flags(0), m_allow_other(false), m_default_permissions(false), m_kernel_minor_version(FUSE_KERNEL_MINOR_VERSION), m_pm(BLOCKING), + m_noatime(false), m_push_symlinks_in(false), m_ro(false), m_async(false), m_noclusterr(false), m_nointr(false), m_time_gran(1), m_maxbcachebuf(0), m_maxphys(0) {} virtual void SetUp(); virtual void TearDown() { if (m_mock) delete m_mock; } /* * Create an expectation that FUSE_ACCESS will be called once for the * given inode with the given access_mode, returning the given errno */ void expect_access(uint64_t ino, mode_t access_mode, int error); /* Expect FUSE_DESTROY and shutdown the daemon */ void expect_destroy(int error); /* * Create an expectation that FUSE_FLUSH will be called times times for * the given inode */ void expect_flush(uint64_t ino, int times, ProcessMockerT r); /* * Create an expectation that FUSE_FORGET will be called for the given * inode. There will be no response. If sem is provided, it will be * posted after the operation is received by the daemon. */ void expect_forget(uint64_t ino, uint64_t nlookup, sem_t *sem = NULL); /* * Create an expectation that FUSE_GETATTR will be called for the given * inode any number of times. It will respond with a few basic * attributes, like the given size and the mode S_IFREG | 0644 */ void expect_getattr(uint64_t ino, uint64_t size); /* * Create an expectation that FUSE_GETXATTR will be called once for the * given inode. */ void expect_getxattr(uint64_t ino, const char *attr, ProcessMockerT r); /* * Create an expectation that FUSE_LOOKUP will be called for the given * path exactly times times and cache validity period. It will respond * with inode ino, mode mode, filesize size. */ void expect_lookup(const char *relpath, uint64_t ino, mode_t mode, uint64_t size, int times, uint64_t attr_valid = UINT64_MAX, uid_t uid = 0, gid_t gid = 0); /* The protocol 7.8 version of expect_lookup */ void expect_lookup_7_8(const char *relpath, uint64_t ino, mode_t mode, uint64_t size, int times, uint64_t attr_valid = UINT64_MAX, uid_t uid = 0, gid_t gid = 0); /* * Create an expectation that FUSE_OPEN will be called for the given * inode exactly times times. It will return with open_flags flags and * file handle FH. */ void expect_open(uint64_t ino, uint32_t flags, int times); /* * Create an expectation that FUSE_OPENDIR will be called exactly once * for inode ino. */ void expect_opendir(uint64_t ino); /* * Create an expectation that FUSE_READ will be called exactly once for * the given inode, at offset offset and with size isize. It will * return the first osize bytes from contents * * Protocol 7.8 tests can use this same expectation method because * nothing currently validates the size of the fuse_read_in struct. */ void expect_read(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, const void *contents, int flags = -1); /* * Create an expectation that FUSE_READIR will be called any number of * times on the given ino with the given offset, returning (by copy) * the provided entries */ void expect_readdir(uint64_t ino, uint64_t off, std::vector &ents); /* * Create an expectation that FUSE_RELEASE will be called exactly once * for the given inode and filehandle, returning success */ void expect_release(uint64_t ino, uint64_t fh); /* * Create an expectation that FUSE_RELEASEDIR will be called exactly * once for the given inode */ void expect_releasedir(uint64_t ino, ProcessMockerT r); /* * Create an expectation that FUSE_UNLINK will be called exactly once * for the given path, returning an errno */ void expect_unlink(uint64_t parent, const char *path, int error); /* * Create an expectation that FUSE_WRITE will be called exactly once * for the given inode, at offset offset, with size isize and buffer * contents. Any flags present in flags_set must be set, and any * present in flags_unset must not be set. Other flags are don't care. * It will return osize. */ void expect_write(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, uint32_t flags_set, uint32_t flags_unset, const void *contents); /* Protocol 7.8 version of expect_write */ void expect_write_7_8(uint64_t ino, uint64_t offset, uint64_t isize, uint64_t osize, const void *contents); /* * Helper that runs code in a child process. * * First, parent_func runs in the parent process. * Then, child_func runs in the child process, dropping privileges if * desired. * Finally, fusetest_fork returns. * * # Returns * * fusetest_fork may SKIP the test, which the caller should detect with * the IsSkipped() method. If not, then the child's exit status will * be returned in status. */ void fork(bool drop_privs, int *status, std::function parent_func, std::function child_func); /* * Deliberately leak a file descriptor. * * Closing a file descriptor on fusefs would cause the server to * receive FUSE_CLOSE and possibly FUSE_INACTIVE. Handling those * operations would needlessly complicate most tests. So most tests * deliberately leak the file descriptors instead. This method serves * to document the leakage, and provide a single point of suppression * for static analyzers. */ /* coverity[+close: arg-0] */ static void leak(int fd __unused) {} /* * Deliberately leak a DIR* pointer * * See comments for FuseTest::leak */ static void leakdir(DIR* dirp __unused) {} };