diff --git a/sys/fs/fuse/fuse_internal.c b/sys/fs/fuse/fuse_internal.c index d57de19038dd..4a09bc9394cf 100644 --- a/sys/fs/fuse/fuse_internal.c +++ b/sys/fs/fuse/fuse_internal.c @@ -1,1345 +1,1348 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_file.h" #include "fuse_internal.h" #include "fuse_io.h" #include "fuse_ipc.h" #include "fuse_node.h" #include "fuse_file.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len); #endif counter_u64_t fuse_lookup_cache_hits; counter_u64_t fuse_lookup_cache_misses; SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD, &fuse_lookup_cache_hits, "number of positive cache hits in lookup"); SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD, &fuse_lookup_cache_misses, "number of cache misses in lookup"); int fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags, struct vnode **vpp) { struct bintime now; struct thread *td = curthread; uint64_t nodeid = ino; int error; *vpp = NULL; error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp, fuse_vnode_cmp, &nodeid); if (error) return error; /* * Check the entry cache timeout. We have to do this within fusefs * instead of by using cache_enter_time/cache_lookup because those * routines are only intended to work with pathnames, not inodes */ if (*vpp != NULL) { getbinuptime(&now); if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){ counter_u64_add(fuse_lookup_cache_hits, 1); return 0; } else { /* Entry cache timeout */ counter_u64_add(fuse_lookup_cache_misses, 1); cache_purge(*vpp); vput(*vpp); *vpp = NULL; } } return 0; } SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin); /* Synchronously send a FUSE_ACCESS operation */ int fuse_internal_access(struct vnode *vp, accmode_t mode, struct thread *td, struct ucred *cred) { int err = 0; uint32_t mask = F_OK; int dataflags; int vtype; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; mp = vnode_mount(vp); vtype = vnode_vtype(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; if (mode == 0) return 0; if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { switch (vp->v_type) { case VDIR: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ case VREG: return EROFS; default: break; } } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { if (fuse_match_cred(data->daemoncred, cred)) return EPERM; } if (dataflags & FSESS_DEFAULT_PERMISSIONS) { struct vattr va; fuse_internal_getattr(vp, &va, cred, td); return vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, mode, cred); } if (mode & VADMIN) { /* * The FUSE protocol doesn't have an equivalent of VADMIN, so * it's a bug if we ever reach this point with that bit set. */ SDT_PROBE0(fusefs, , internal, access_vadmin); } if (fsess_not_impl(mp, FUSE_ACCESS)) return 0; if ((mode & (VWRITE | VAPPEND)) != 0) mask |= W_OK; if ((mode & VREAD) != 0) mask |= R_OK; if ((mode & VEXEC) != 0) mask |= X_OK; fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); fai = fdi.indata; fai->mask = mask; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_ACCESS); err = 0; } return err; } /* * Cache FUSE attributes from attr, in attribute cache associated with vnode * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the * converted attributes there as well. * * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do * return the result to the caller). */ void fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr, uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap, bool from_server) { struct mount *mp; struct fuse_vnode_data *fvdat; struct fuse_data *data; struct vattr *vp_cache_at; mp = vnode_mount(vp); fvdat = VTOFUD(vp); data = fuse_get_mpdata(mp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); fuse_validity_2_bintime(attr_valid, attr_valid_nsec, &fvdat->attr_cache_timeout); if (vnode_isreg(vp) && fvdat->cached_attrs.va_size != VNOVAL && attr->size != fvdat->cached_attrs.va_size) { if ( data->cache_mode == FUSE_CACHE_WB && fvdat->flag & FN_SIZECHANGE) { const char *msg; /* * The server changed the file's size even though we're * using writeback cacheing and and we have outstanding * dirty writes! That's a server bug. */ if (fuse_libabi_geq(data, 7, 23)) { msg = "writeback cache incoherent!." "To prevent data corruption, disable " "the writeback cache according to your " "FUSE server's documentation."; } else { msg = "writeback cache incoherent!." "To prevent data corruption, disable " "the writeback cache by setting " "vfs.fusefs.data_cache_mode to 0 or 1."; } fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg); } if (fuse_vnode_attr_cache_valid(vp) && data->cache_mode != FUSE_CACHE_UC) { /* * The server changed the file's size even though we * have it cached and our cache has not yet expired. * That's a bug. */ fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT, "cache incoherent! " "To prevent " "data corruption, disable the data cache " "by mounting with -o direct_io, or as " "directed otherwise by your FUSE server's " "documentation."); } } /* Fix our buffers if the filesize changed without us knowing */ if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { (void)fuse_vnode_setsize(vp, attr->size, from_server); fvdat->cached_attrs.va_size = attr->size; } if (attr_valid > 0 || attr_valid_nsec > 0) vp_cache_at = &(fvdat->cached_attrs); else if (vap != NULL) vp_cache_at = vap; else return; vattr_null(vp_cache_at); vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; vp_cache_at->va_fileid = attr->ino; vp_cache_at->va_mode = attr->mode & ~S_IFMT; vp_cache_at->va_nlink = attr->nlink; vp_cache_at->va_uid = attr->uid; vp_cache_at->va_gid = attr->gid; vp_cache_at->va_rdev = attr->rdev; vp_cache_at->va_size = attr->size; /* XXX on i386, seconds are truncated to 32 bits */ vp_cache_at->va_atime.tv_sec = attr->atime; vp_cache_at->va_atime.tv_nsec = attr->atimensec; vp_cache_at->va_mtime.tv_sec = attr->mtime; vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; vp_cache_at->va_ctime.tv_sec = attr->ctime; vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) vp_cache_at->va_blocksize = attr->blksize; else vp_cache_at->va_blocksize = PAGE_SIZE; vp_cache_at->va_type = IFTOVT(attr->mode); vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; vp_cache_at->va_flags = 0; if (vap != vp_cache_at && vap != NULL) memcpy(vap, vp_cache_at, sizeof(*vap)); } /* fsync */ int fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) { if (tick->tk_aw_ohead.error == ENOSYS) { fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); } return 0; } int fuse_internal_fsync(struct vnode *vp, struct thread *td, int waitfor, bool datasync) { struct fuse_fsync_in *ffsi = NULL; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct mount *mp = vnode_mount(vp); int op = FUSE_FSYNC; int err = 0; if (fsess_not_impl(vnode_mount(vp), (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { return 0; } if (vnode_isdir(vp)) op = FUSE_FSYNCDIR; if (fsess_not_impl(mp, op)) return 0; fdisp_init(&fdi, sizeof(*ffsi)); /* * fsync every open file handle for this file, because we can't be sure * which file handle the caller is really referring to. */ LIST_FOREACH(fufh, &fvdat->handles, next) { fdi.iosize = sizeof(*ffsi); if (ffsi == NULL) fdisp_make_vp(&fdi, op, vp, td, NULL); else fdisp_refresh_vp(&fdi, op, vp, td, NULL); ffsi = fdi.indata; ffsi->fh = fufh->fh_id; ffsi->fsync_flags = 0; if (datasync) ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC; if (waitfor == MNT_WAIT) { err = fdisp_wait_answ(&fdi); } else { fuse_insert_callback(fdi.tick, fuse_internal_fsync_callback); fuse_insert_message(fdi.tick, false); } if (err == ENOSYS) { /* ENOSYS means "success, and don't call again" */ fsess_set_notimpl(mp, op); err = 0; break; } } fdisp_destroy(&fdi); return err; } /* Asynchronous invalidation */ SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry, "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*"); int fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) { struct fuse_notify_inval_entry_out fnieo; struct componentname cn; struct vnode *dvp, *vp; char name[PATH_MAX]; int err; if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) return (err); if (fnieo.namelen >= sizeof(name)) return (EINVAL); if ((err = uiomove(name, fnieo.namelen, uio)) != 0) return (err); name[fnieo.namelen] = '\0'; /* fusefs does not cache "." or ".." entries */ if (strncmp(name, ".", sizeof(".")) == 0 || strncmp(name, "..", sizeof("..")) == 0) return (0); if (fnieo.parent == FUSE_ROOT_ID) err = VFS_ROOT(mp, LK_SHARED, &dvp); else err = fuse_internal_get_cached_vnode( mp, fnieo.parent, LK_SHARED, &dvp); SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name); /* * If dvp is not in the cache, then it must've been reclaimed. And * since fuse_vnop_reclaim does a cache_purge, name's entry must've * been invalidated already. So we can safely return if dvp == NULL */ if (err != 0 || dvp == NULL) return (err); /* * XXX we can't check dvp's generation because the FUSE invalidate * entry message doesn't include it. Worse case is that we invalidate * an entry that didn't need to be invalidated. */ cn.cn_nameiop = LOOKUP; cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ cn.cn_cred = curthread->td_ucred; cn.cn_lkflags = LK_SHARED; cn.cn_pnbuf = NULL; cn.cn_nameptr = name; cn.cn_namelen = fnieo.namelen; err = cache_lookup(dvp, &vp, &cn, NULL, NULL); MPASS(err == 0); fuse_vnode_clear_attr_cache(dvp); vput(dvp); return (0); } SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode, "struct vnode*", "struct fuse_notify_inval_inode_out *"); int fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio) { struct fuse_notify_inval_inode_out fniio; struct vnode *vp; int err; if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0) return (err); if (fniio.ino == FUSE_ROOT_ID) err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp); else err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED, &vp); SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio); if (err != 0 || vp == NULL) return (err); /* * XXX we can't check vp's generation because the FUSE invalidate * entry message doesn't include it. Worse case is that we invalidate * an inode that didn't need to be invalidated. */ /* * Flush and invalidate buffers if off >= 0. Technically we only need * to flush and invalidate the range of offsets [off, off + len), but * for simplicity's sake we do everything. */ if (fniio.off >= 0) fuse_io_invalbuf(vp, curthread); fuse_vnode_clear_attr_cache(vp); vput(vp); return (0); } /* mknod */ int fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) { struct fuse_data *data; struct fuse_mknod_in fmni; size_t insize; data = fuse_get_mpdata(dvp->v_mount); fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); fmni.rdev = vap->va_rdev; if (fuse_libabi_geq(data, 7, 12)) { insize = sizeof(fmni); fmni.umask = curthread->td_proc->p_pd->pd_cmask; } else { insize = FUSE_COMPAT_MKNOD_IN_SIZE; } return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, insize, vap->va_type)); } /* readdir */ int fuse_internal_readdir(struct vnode *vp, struct uio *uio, off_t startoff, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *ncookies, u_long *cookies) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri = NULL; int fnd_start; if (uio_resid(uio) == 0) return 0; fdisp_init(&fdi, 0); /* * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p * I/O). */ /* * fnd_start is set non-zero once the offset in the directory gets * to the startoff. This is done because directories must be read * from the beginning (offset == 0) when fuse_vnop_readdir() needs * to do an open of the directory. * If it is not set non-zero here, it will be set non-zero in * fuse_internal_readdir_processdata() when uio_offset == startoff. */ fnd_start = 0; if (uio->uio_offset == startoff) fnd_start = 1; while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); if (fri == NULL) fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); else fdisp_refresh_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); fri->size = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_read); if ((err = fdisp_wait_answ(&fdi))) break; if ((err = fuse_internal_readdir_processdata(uio, startoff, &fnd_start, fri->size, fdi.answ, fdi.iosize, cookediov, ncookies, &cookies))) break; } fdisp_destroy(&fdi); return ((err == -1) ? 0 : err); } /* * Return -1 to indicate that this readdir is finished, 0 if it copied * all the directory data read in and it may be possible to read more * and greater than 0 for a failure. */ int fuse_internal_readdir_processdata(struct uio *uio, off_t startoff, int *fnd_start, size_t reqsize, void *buf, size_t bufsize, struct fuse_iov *cookediov, int *ncookies, u_long **cookiesp) { int err = 0; int oreclen; size_t freclen; struct dirent *de; struct fuse_dirent *fudge; u_long *cookies; cookies = *cookiesp; if (bufsize < FUSE_NAME_OFFSET) return -1; for (;;) { if (bufsize < FUSE_NAME_OFFSET) { err = -1; break; } fudge = (struct fuse_dirent *)buf; freclen = FUSE_DIRENT_SIZE(fudge); if (bufsize < freclen) { /* * This indicates a partial directory entry at the * end of the directory data. */ err = -1; break; } #ifdef ZERO_PAD_INCOMPLETE_BUFS if (isbzero(buf, FUSE_NAME_OFFSET)) { err = -1; break; } #endif if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { err = EINVAL; break; } oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *) &fudge->namelen); if (oreclen > uio_resid(uio)) { /* Out of space for the dir so we are done. */ err = -1; break; } /* * Don't start to copy the directory entries out until * the requested offset in the directory is found. */ if (*fnd_start != 0) { fiov_adjust(cookediov, oreclen); bzero(cookediov->base, oreclen); de = (struct dirent *)cookediov->base; de->d_fileno = fudge->ino; de->d_off = fudge->off; de->d_reclen = oreclen; de->d_type = fudge->type; de->d_namlen = fudge->namelen; memcpy((char *)cookediov->base + sizeof(struct dirent) - MAXNAMLEN - 1, (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); dirent_terminate(de); err = uiomove(cookediov->base, cookediov->len, uio); if (err) break; if (cookies != NULL) { if (*ncookies == 0) { err = -1; break; } *cookies = fudge->off; cookies++; (*ncookies)--; } } else if (startoff == fudge->off) *fnd_start = 1; buf = (char *)buf + freclen; bufsize -= freclen; uio_setoffset(uio, fudge->off); } *cookiesp = cookies; return err; } /* remove */ int fuse_internal_remove(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, enum fuse_opcode op) { struct fuse_dispatcher fdi; nlink_t nlink; int err = 0; fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err) return (err); /* * Access the cached nlink even if the attr cached has expired. If * it's inaccurate, the worst that will happen is: * 1) We'll recycle the vnode even though the file has another link we * don't know about, costing a bit of cpu time, or * 2) We won't recycle the vnode even though all of its links are gone. * It will linger around until vnlru reclaims it, costing a bit of * temporary memory. */ nlink = VTOFUD(vp)->cached_attrs.va_nlink--; /* * Purge the parent's attribute cache because the daemon * should've updated its mtime and ctime. */ fuse_vnode_clear_attr_cache(dvp); /* NB: nlink could be zero if it was never cached */ if (nlink <= 1 || vnode_vtype(vp) == VDIR) { fuse_internal_vnode_disappear(vp); } else { cache_purge(vp); fuse_vnode_update(vp, FN_CTIMECHANGE); } return err; } /* rename */ int fuse_internal_rename(struct vnode *fdvp, struct componentname *fcnp, struct vnode *tdvp, struct componentname *tcnp) { struct fuse_dispatcher fdi; struct fuse_rename_in *fri; int err = 0; fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred); fri = fdi.indata; fri->newdir = VTOI(tdvp); memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, fcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, tcnp->cn_nameptr, tcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 1] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); return err; } /* strategy */ /* entity creation */ void fuse_internal_newentry_makerequest(struct mount *mp, uint64_t dnid, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, struct fuse_dispatcher *fdip) { fdip->iosize = bufsize + cnp->cn_namelen + 1; fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred); memcpy(fdip->indata, buf, bufsize); memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; } int fuse_internal_newentry_core(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp, struct fuse_dispatcher *fdip) { int err = 0; struct fuse_entry_out *feo; struct mount *mp = vnode_mount(dvp); if ((err = fdisp_wait_answ(fdip))) { return err; } feo = fdip->answ; if ((err = fuse_internal_checkentry(feo, vtyp))) { return err; } err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); if (err) { fuse_internal_forget_send(mp, curthread, cnp->cn_cred, feo->nodeid, 1); return err; } /* * Purge the parent's attribute cache because the daemon should've * updated its mtime and ctime */ fuse_vnode_clear_attr_cache(dvp); fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL, true); return err; } int fuse_internal_newentry(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, enum vtype vtype) { int err; struct fuse_dispatcher fdi; struct mount *mp = vnode_mount(dvp); fdisp_init(&fdi, 0); fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, bufsize, &fdi); err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); fdisp_destroy(&fdi); return err; } /* entity destruction */ int fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) { fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); return 0; } void fuse_internal_forget_send(struct mount *mp, struct thread *td, struct ucred *cred, uint64_t nodeid, uint64_t nlookup) { struct fuse_dispatcher fdi; struct fuse_forget_in *ffi; /* * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", * (long long unsigned) nodeid)); */ fdisp_init(&fdi, sizeof(*ffi)); fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); ffi = fdi.indata; ffi->nlookup = nlookup; fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* Fetch the vnode's attributes from the daemon*/ int fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct fuse_dispatcher fdi; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_getattr_in *fgai; struct fuse_attr_out *fao; off_t old_filesize = fvdat->cached_attrs.va_size; struct timespec old_atime = fvdat->cached_attrs.va_atime; struct timespec old_ctime = fvdat->cached_attrs.va_ctime; struct timespec old_mtime = fvdat->cached_attrs.va_mtime; enum vtype vtyp; int err; fdisp_init(&fdi, sizeof(*fgai)); fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); fgai = fdi.indata; /* * We could look up a file handle and set it in fgai->fh, but that * involves extra runtime work and I'm unaware of any file systems that * care. */ fgai->getattr_flags = 0; if ((err = fdisp_wait_answ(&fdi))) { if (err == ENOENT) fuse_internal_vnode_disappear(vp); goto out; } fao = (struct fuse_attr_out *)fdi.answ; vtyp = IFTOVT(fao->attr.mode); if (fvdat->flag & FN_SIZECHANGE) fao->attr.size = old_filesize; if (fvdat->flag & FN_ATIMECHANGE) { fao->attr.atime = old_atime.tv_sec; fao->attr.atimensec = old_atime.tv_nsec; } if (fvdat->flag & FN_CTIMECHANGE) { fao->attr.ctime = old_ctime.tv_sec; fao->attr.ctimensec = old_ctime.tv_nsec; } if (fvdat->flag & FN_MTIMECHANGE) { fao->attr.mtime = old_mtime.tv_sec; fao->attr.mtimensec = old_mtime.tv_nsec; } fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, vap, true); if (vtyp != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; } out: fdisp_destroy(&fdi); return err; } /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ int fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct vattr *attrs; if ((attrs = VTOVA(vp)) != NULL) { *vap = *attrs; /* struct copy */ return 0; } return fuse_internal_do_getattr(vp, vap, cred, td); } void fuse_internal_vnode_disappear(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); fvdat->flag |= FN_REVOKED; cache_purge(vp); } /* fuse start/stop */ SDT_PROBE_DEFINE2(fusefs, , internal, init_done, "struct fuse_data*", "struct fuse_init_out*"); int fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) { int err = 0; struct fuse_data *data = tick->tk_data; struct fuse_init_out *fiio; if ((err = tick->tk_aw_ohead.error)) { goto out; } if ((err = fticket_pull(tick, uio))) { goto out; } fiio = fticket_resp(tick)->base; data->fuse_libabi_major = fiio->major; data->fuse_libabi_minor = fiio->minor; if (!fuse_libabi_geq(data, 7, 4)) { /* * With a little work we could support servers as old as 7.1. * But there would be little payoff. */ SDT_PROBE2(fusefs, , internal, trace, 1, "userpace version too low"); err = EPROTONOSUPPORT; goto out; } if (fuse_libabi_geq(data, 7, 5)) { if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) || fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) { data->max_write = fiio->max_write; if (fiio->flags & FUSE_ASYNC_READ) data->dataflags |= FSESS_ASYNC_READ; if (fiio->flags & FUSE_POSIX_LOCKS) data->dataflags |= FSESS_POSIX_LOCKS; if (fiio->flags & FUSE_EXPORT_SUPPORT) data->dataflags |= FSESS_EXPORT_SUPPORT; if (fiio->flags & FUSE_NO_OPEN_SUPPORT) data->dataflags |= FSESS_NO_OPEN_SUPPORT; if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT) data->dataflags |= FSESS_NO_OPENDIR_SUPPORT; /* * Don't bother to check FUSE_BIG_WRITES, because it's * redundant with max_write */ /* * max_background and congestion_threshold are not * implemented */ } else { err = EINVAL; } } else { /* Old fixed values */ data->max_write = 4096; } if (fuse_libabi_geq(data, 7, 6)) data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf; if (!fuse_libabi_geq(data, 7, 7)) fsess_set_notimpl(data->mp, FUSE_INTERRUPT); if (!fuse_libabi_geq(data, 7, 8)) { fsess_set_notimpl(data->mp, FUSE_BMAP); fsess_set_notimpl(data->mp, FUSE_DESTROY); } if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 && fiio->time_gran <= 1000000000) data->time_gran = fiio->time_gran; else data->time_gran = 1; if (!fuse_libabi_geq(data, 7, 23)) data->cache_mode = fuse_data_cache_mode; else if (fiio->flags & FUSE_WRITEBACK_CACHE) data->cache_mode = FUSE_CACHE_WB; else data->cache_mode = FUSE_CACHE_WT; if (!fuse_libabi_geq(data, 7, 24)) fsess_set_notimpl(data->mp, FUSE_LSEEK); if (!fuse_libabi_geq(data, 7, 28)) fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE); out: if (err) { fdata_set_dead(data); } FUSE_LOCK(); data->dataflags |= FSESS_INITED; SDT_PROBE2(fusefs, , internal, init_done, data, fiio); wakeup(&data->ticketer); FUSE_UNLOCK(); return 0; } void fuse_internal_send_init(struct fuse_data *data, struct thread *td) { struct fuse_init_in *fiii; struct fuse_dispatcher fdi; fdisp_init(&fdi, sizeof(*fiii)); fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); fiii = fdi.indata; fiii->major = FUSE_KERNEL_VERSION; fiii->minor = FUSE_KERNEL_MINOR_VERSION; /* * fusefs currently reads ahead no more than one cache block at a time. * See fuse_read_biobackend */ fiii->max_readahead = maxbcachebuf; /* * Unsupported features: * FUSE_FILE_OPS: No known FUSE server or client supports it * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even * when default ACLs are in use. * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD * doesn't have splice(2). * FUSE_FLOCK_LOCKS: not yet implemented * FUSE_HAS_IOCTL_DIR: not yet implemented * FUSE_AUTO_INVAL_DATA: not yet implemented * FUSE_DO_READDIRPLUS: not yet implemented * FUSE_READDIRPLUS_AUTO: not yet implemented * FUSE_ASYNC_DIO: not yet implemented * FUSE_PARALLEL_DIROPS: not yet implemented * FUSE_HANDLE_KILLPRIV: not yet implemented * FUSE_POSIX_ACL: not yet implemented * FUSE_ABORT_ERROR: not yet implemented * FUSE_CACHE_SYMLINKS: not yet implemented * FUSE_MAX_PAGES: not yet implemented */ fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT; fuse_insert_callback(fdi.tick, fuse_internal_init_callback); fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, * send the request with root credentials */ int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, struct thread *td, struct ucred *cred) { struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; struct mount *mp; pid_t pid = td->td_proc->p_pid; struct fuse_data *data; int dataflags; int err = 0; enum vtype vtyp; int sizechanged = -1; uint64_t newsize = 0; mp = vnode_mount(vp); fvdat = VTOFUD(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); if (!cred) { fdi.finh->uid = 0; fdi.finh->gid = 0; } fsai = fdi.indata; fsai->valid = 0; if (vap->va_uid != (uid_t)VNOVAL) { fsai->uid = vap->va_uid; fsai->valid |= FATTR_UID; } if (vap->va_gid != (gid_t)VNOVAL) { fsai->gid = vap->va_gid; fsai->valid |= FATTR_GID; } if (vap->va_size != VNOVAL) { struct fuse_filehandle *fufh = NULL; /*Truncate to a new value. */ fsai->size = vap->va_size; sizechanged = 1; newsize = vap->va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } VTOFUD(vp)->flag &= ~FN_SIZECHANGE; } if (vap->va_atime.tv_sec != VNOVAL) { fsai->atime = vap->va_atime.tv_sec; fsai->atimensec = vap->va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_ATIME_NOW; } else if (fvdat->flag & FN_ATIMECHANGE) { fsai->atime = fvdat->cached_attrs.va_atime.tv_sec; fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; } if (vap->va_mtime.tv_sec != VNOVAL) { fsai->mtime = vap->va_mtime.tv_sec; fsai->mtimensec = vap->va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_MTIME_NOW; } else if (fvdat->flag & FN_MTIMECHANGE) { fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec; fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; } if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) { fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec; fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec; fsai->valid |= FATTR_CTIME; } if (vap->va_mode != (mode_t)VNOVAL) { fsai->mode = vap->va_mode & ALLPERMS; fsai->valid |= FATTR_MODE; } if (!fsai->valid) { goto out; } if ((err = fdisp_wait_answ(&fdi))) goto out; vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); if (vnode_vtype(vp) != vtyp) { if (vnode_vtype(vp) == VNON && vtyp != VNON) { SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " "vnode_vtype is VNON and vtype isn't."); } else { /* * STALE vnode, ditch * * The vnode has changed its type "behind our back". + * This probably means that the file got deleted and + * recreated on the server, with the same inode. * There's nothing really we can do, so let us just - * force an internal revocation and tell the caller to - * try again, if interested. + * return ENOENT. After all, the entry must not have + * existed in the recent past. If the user tries + * again, it will work. */ fuse_internal_vnode_disappear(vp); - err = EAGAIN; + err = ENOENT; } } if (err == 0) { struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; fuse_vnode_undirty_cached_timestamps(vp, true); fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, NULL, false); } out: fdisp_destroy(&fdi); return err; } /* * FreeBSD clears the SUID and SGID bits on any write by a non-root user. */ void fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred, struct thread *td) { struct fuse_data *data; struct mount *mp; struct vattr va; int dataflags; mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; ASSERT_VOP_LOCKED(vp, __func__); if (dataflags & FSESS_DEFAULT_PERMISSIONS) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) { fuse_internal_getattr(vp, &va, cred, td); if (va.va_mode & (S_ISUID | S_ISGID)) { mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID); /* Clear all vattr fields except mode */ vattr_null(&va); va.va_mode = mode; /* * Ignore fuse_internal_setattr's return value, * because at this point the write operation has * already succeeded and we don't want to return * failing status for that. */ (void)fuse_internal_setattr(vp, &va, td, NULL); } } } } #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len) { int i; for (i = 0; i < len; i++) { if (((char *)buf)[i]) return (0); } return (1); } #endif void fuse_internal_init(void) { fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK); fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK); } void fuse_internal_destroy(void) { counter_u64_free(fuse_lookup_cache_hits); counter_u64_free(fuse_lookup_cache_misses); } diff --git a/sys/fs/fuse/fuse_node.c b/sys/fs/fuse/fuse_node.c index 1d485b6a0b46..6d8f826cef85 100644 --- a/sys/fs/fuse/fuse_node.c +++ b/sys/fs/fuse/fuse_node.c @@ -1,524 +1,527 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Copyright (c) 2019 The FreeBSD Foundation * * Portions of this software were developed by BFF Storage Systems, LLC under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_node.h" #include "fuse_internal.h" #include "fuse_io.h" #include "fuse_ipc.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , node, trace, "int", "char*"); MALLOC_DEFINE(M_FUSEVN, "fuse_vnode", "fuse vnode private data"); static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS); static counter_u64_t fuse_node_count; SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, node_count, CTLFLAG_RD, &fuse_node_count, "Count of FUSE vnodes"); int fuse_data_cache_mode = FUSE_CACHE_WT; /* * DEPRECATED * This sysctl is no longer needed as of fuse protocol 7.23. Individual * servers can select the cache behavior they need for each mountpoint: * - writethrough: the default * - writeback: set FUSE_WRITEBACK_CACHE in fuse_init_out.flags * - uncached: set FOPEN_DIRECT_IO for every file * The sysctl is retained primarily for use by jails supporting older FUSE * protocols. It may be removed entirely once FreeBSD 11.3 and 12.0 are EOL. */ SYSCTL_PROC(_vfs_fusefs, OID_AUTO, data_cache_mode, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &fuse_data_cache_mode, 0, sysctl_fuse_cache_mode, "I", "Zero: disable caching of FUSE file data; One: write-through caching " "(default); Two: write-back caching (generally unsafe)"); static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS) { int val, error; val = *(int *)arg1; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); switch (val) { case FUSE_CACHE_UC: case FUSE_CACHE_WT: case FUSE_CACHE_WB: *(int *)arg1 = val; break; default: return (EDOM); } return (0); } static void fuse_vnode_init(struct vnode *vp, struct fuse_vnode_data *fvdat, uint64_t nodeid, enum vtype vtyp) { fvdat->nid = nodeid; LIST_INIT(&fvdat->handles); vattr_null(&fvdat->cached_attrs); if (nodeid == FUSE_ROOT_ID) { vp->v_vflag |= VV_ROOT; } vp->v_type = vtyp; vp->v_data = fvdat; cluster_init_vn(&fvdat->clusterw); counter_u64_add(fuse_node_count, 1); } void fuse_vnode_destroy(struct vnode *vp) { struct fuse_vnode_data *fvdat = vp->v_data; vp->v_data = NULL; KASSERT(LIST_EMPTY(&fvdat->handles), ("Destroying fuse vnode with open files!")); free(fvdat, M_FUSEVN); counter_u64_add(fuse_node_count, -1); } int fuse_vnode_cmp(struct vnode *vp, void *nidp) { return (VTOI(vp) != *((uint64_t *)nidp)); } SDT_PROBE_DEFINE3(fusefs, , node, stale_vnode, "struct vnode*", "enum vtype", "uint64_t"); static int fuse_vnode_alloc(struct mount *mp, struct thread *td, uint64_t nodeid, enum vtype vtyp, struct vnode **vpp) { struct fuse_data *data; struct fuse_vnode_data *fvdat; struct vnode *vp2; int err = 0; data = fuse_get_mpdata(mp); if (vtyp == VNON) { return EINVAL; } *vpp = NULL; err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, vpp, fuse_vnode_cmp, &nodeid); if (err) return (err); if (*vpp) { - if ((*vpp)->v_type != vtyp) { + if ((*vpp)->v_type == vtyp) { + /* Reuse a vnode that hasn't yet been reclaimed */ + MPASS((*vpp)->v_data != NULL); + MPASS(VTOFUD(*vpp)->nid == nodeid); + SDT_PROBE2(fusefs, , node, trace, 1, + "vnode taken from hash"); + return (0); + } else { /* - * STALE vnode! This probably indicates a buggy - * server, but it could also be the result of a race - * between FUSE_LOOKUP and another client's - * FUSE_UNLINK/FUSE_CREATE + * The inode changed types! If we get here, we can't + * tell whether the inode's entry cache had expired + * yet. So this could be the result of a buggy server, + * but more likely the server just reused an inode + * number following an entry cache expiration. */ SDT_PROBE3(fusefs, , node, stale_vnode, *vpp, vtyp, nodeid); fuse_internal_vnode_disappear(*vpp); + vgone(*vpp); lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); - *vpp = NULL; - return (EAGAIN); } - MPASS((*vpp)->v_data != NULL); - MPASS(VTOFUD(*vpp)->nid == nodeid); - SDT_PROBE2(fusefs, , node, trace, 1, "vnode taken from hash"); - return (0); } fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO); switch (vtyp) { case VFIFO: err = getnewvnode("fuse", mp, &fuse_fifoops, vpp); break; default: err = getnewvnode("fuse", mp, &fuse_vnops, vpp); break; } if (err) { free(fvdat, M_FUSEVN); return (err); } lockmgr((*vpp)->v_vnlock, LK_EXCLUSIVE, NULL); fuse_vnode_init(*vpp, fvdat, nodeid, vtyp); err = insmntque(*vpp, mp); ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); if (err) { lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } /* Disallow async reads for fifos because UFS does. I don't know why */ if (data->dataflags & FSESS_ASYNC_READ && vtyp != VFIFO) VN_LOCK_ASHARE(*vpp); err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, &vp2, fuse_vnode_cmp, &nodeid); if (err) { lockmgr((*vpp)->v_vnlock, LK_RELEASE, NULL); free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } if (vp2 != NULL) { *vpp = vp2; return (0); } ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); return (0); } int fuse_vnode_get(struct mount *mp, struct fuse_entry_out *feo, uint64_t nodeid, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp) { struct thread *td = curthread; /* * feo should only be NULL for the root directory, which (when libfuse * is used) always has generation 0 */ uint64_t generation = feo ? feo->generation : 0; int err = 0; err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp); if (err) { return err; } if (dvp != NULL) { MPASS(cnp && (cnp->cn_flags & ISDOTDOT) == 0); MPASS(cnp && !(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')); fuse_vnode_setparent(*vpp, dvp); } if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0 && feo != NULL && (feo->entry_valid != 0 || feo->entry_valid_nsec != 0)) { struct timespec timeout; ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get"); ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get"); fuse_validity_2_timespec(feo, &timeout); cache_enter_time(dvp, *vpp, cnp, &timeout, NULL); } VTOFUD(*vpp)->generation = generation; /* * In userland, libfuse uses cached lookups for dot and dotdot entries, * thus it does not really bump the nlookup counter for forget. * Follow the same semantic and avoid the bump in order to keep * nlookup counters consistent. */ if (cnp == NULL || ((cnp->cn_flags & ISDOTDOT) == 0 && (cnp->cn_namelen != 1 || cnp->cn_nameptr[0] != '.'))) VTOFUD(*vpp)->nlookup++; return 0; } /* * Called for every fusefs vnode open to initialize the vnode (not * fuse_filehandle) for use */ void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td) { if (vnode_vtype(vp) == VREG) vnode_create_vobject(vp, 0, td); } int fuse_vnode_savesize(struct vnode *vp, struct ucred *cred, pid_t pid) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct thread *td = curthread; struct fuse_filehandle *fufh = NULL; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; int err = 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_extend"); if (fuse_isdeadfs(vp)) { return EBADF; } if (vnode_vtype(vp) == VDIR) { return EISDIR; } if (vfs_isrdonly(vnode_mount(vp))) { return EROFS; } if (cred == NULL) { cred = td->td_ucred; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; /* Truncate to a new value. */ MPASS((fvdat->flag & FN_SIZECHANGE) != 0); fsai->size = fvdat->cached_attrs.va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == 0) fvdat->flag &= ~FN_SIZECHANGE; return err; } /* * Adjust the vnode's size to a new value. * * If the new value came from the server, such as from a FUSE_GETATTR * operation, set `from_server` true. But if it came from a local operation, * such as write(2) or truncate(2), set `from_server` false. */ int fuse_vnode_setsize(struct vnode *vp, off_t newsize, bool from_server) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct vattr *attrs; off_t oldsize; size_t iosize; struct buf *bp = NULL; int err = 0; ASSERT_VOP_ELOCKED(vp, "fuse_vnode_setsize"); iosize = fuse_iosize(vp); oldsize = fvdat->cached_attrs.va_size; fvdat->cached_attrs.va_size = newsize; if ((attrs = VTOVA(vp)) != NULL) attrs->va_size = newsize; if (newsize < oldsize) { daddr_t lbn; err = vtruncbuf(vp, newsize, fuse_iosize(vp)); if (err) goto out; if (newsize % iosize == 0) goto out; /* * Zero the contents of the last partial block. * Sure seems like vtruncbuf should do this for us. */ lbn = newsize / iosize; bp = getblk(vp, lbn, iosize, PCATCH, 0, 0); if (!bp) { err = EINTR; goto out; } if (!(bp->b_flags & B_CACHE)) goto out; /* Nothing to do */ MPASS(bp->b_flags & B_VMIO); vfs_bio_clrbuf(bp); bp->b_dirtyend = MIN(bp->b_dirtyend, newsize - lbn * iosize); } else if (from_server && newsize > oldsize && oldsize != VNOVAL) { /* * The FUSE server changed the file size behind our back. We * should invalidate the entire cache. */ daddr_t left_lbn, end_lbn; left_lbn = oldsize / iosize; end_lbn = howmany(newsize, iosize); v_inval_buf_range(vp, 0, end_lbn, iosize); } out: if (bp) brelse(bp); vnode_pager_setsize(vp, newsize); return err; } /* Get the current, possibly dirty, size of the file */ int fuse_vnode_size(struct vnode *vp, off_t *filesize, struct ucred *cred, struct thread *td) { struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; if (!(fvdat->flag & FN_SIZECHANGE) && (!fuse_vnode_attr_cache_valid(vp) || fvdat->cached_attrs.va_size == VNOVAL)) error = fuse_internal_do_getattr(vp, NULL, cred, td); if (!error) *filesize = fvdat->cached_attrs.va_size; return error; } void fuse_vnode_undirty_cached_timestamps(struct vnode *vp, bool atime) { struct fuse_vnode_data *fvdat = VTOFUD(vp); fvdat->flag &= ~(FN_MTIMECHANGE | FN_CTIMECHANGE); if (atime) fvdat->flag &= ~FN_ATIMECHANGE; } /* Update a fuse file's cached timestamps */ void fuse_vnode_update(struct vnode *vp, int flags) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct mount *mp = vnode_mount(vp); struct fuse_data *data = fuse_get_mpdata(mp); struct timespec ts; vfs_timestamp(&ts); if (data->time_gran > 1) ts.tv_nsec = rounddown(ts.tv_nsec, data->time_gran); if (mp->mnt_flag & MNT_NOATIME) flags &= ~FN_ATIMECHANGE; if (flags & FN_ATIMECHANGE) fvdat->cached_attrs.va_atime = ts; if (flags & FN_MTIMECHANGE) fvdat->cached_attrs.va_mtime = ts; if (flags & FN_CTIMECHANGE) fvdat->cached_attrs.va_ctime = ts; fvdat->flag |= flags; } void fuse_node_init(void) { fuse_node_count = counter_u64_alloc(M_WAITOK); } void fuse_node_destroy(void) { counter_u64_free(fuse_node_count); } diff --git a/tests/sys/fs/fusefs/getattr.cc b/tests/sys/fs/fusefs/getattr.cc index fb91f8c049d0..6bca7e0af7c7 100644 --- a/tests/sys/fs/fusefs/getattr.cc +++ b/tests/sys/fs/fusefs/getattr.cc @@ -1,316 +1,366 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include } #include "mockfs.hh" #include "utils.hh" using namespace testing; class Getattr : public FuseTest { public: void expect_lookup(const char *relpath, uint64_t ino, mode_t mode, uint64_t size, int times, uint64_t attr_valid, uint32_t attr_valid_nsec) { EXPECT_LOOKUP(FUSE_ROOT_ID, relpath) .Times(times) .WillRepeatedly(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = mode; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 1; out.body.entry.attr_valid = attr_valid; out.body.entry.attr_valid_nsec = attr_valid_nsec; out.body.entry.attr.size = size; out.body.entry.entry_valid = UINT64_MAX; }))); } }; class Getattr_7_8: public FuseTest { public: virtual void SetUp() { m_kernel_minor_version = 8; FuseTest::SetUp(); } }; /* * If getattr returns a non-zero cache timeout, then subsequent VOP_GETATTRs * should use the cached attributes, rather than query the daemon */ TEST_F(Getattr, attr_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.entry_valid = UINT64_MAX; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr_valid = UINT64_MAX; out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; }))); EXPECT_EQ(0, stat(FULLPATH, &sb)); /* The second stat(2) should use cached attributes */ EXPECT_EQ(0, stat(FULLPATH, &sb)); } /* * If getattr returns a finite but non-zero cache timeout, then we should * discard the cached attributes and requery the daemon after the timeout * period passes. */ TEST_F(Getattr, attr_cache_timeout) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1, 0, 0); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).Times(2) .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr_valid_nsec = NAP_NS / 2; out.body.attr.attr_valid = 0; out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; }))); EXPECT_EQ(0, stat(FULLPATH, &sb)); nap(); /* Timeout has expired. stat(2) should requery the daemon */ EXPECT_EQ(0, stat(FULLPATH, &sb)); } /* * If attr.blksize is zero, then the kernel should use a default value for * st_blksize */ TEST_F(Getattr, blksize_zero) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; expect_lookup(RELPATH, ino, S_IFREG | 0644, 1, 1, 0, 0); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.blksize = 0; out.body.attr.attr.size = 1; }))); ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); EXPECT_EQ((blksize_t)PAGE_SIZE, sb.st_blksize); } TEST_F(Getattr, enoent) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; struct stat sb; const uint64_t ino = 42; sem_t sem; ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno); expect_lookup(RELPATH, ino, S_IFREG | 0644, 0, 1, 0, 0); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(ENOENT))); // Since FUSE_GETATTR returns ENOENT, the kernel will reclaim the vnode // and send a FUSE_FORGET expect_forget(ino, 1, &sem); EXPECT_NE(0, stat(FULLPATH, &sb)); EXPECT_EQ(ENOENT, errno); sem_wait(&sem); sem_destroy(&sem); } TEST_F(Getattr, ok) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; expect_lookup(RELPATH, ino, S_IFREG | 0644, 1, 1, 0, 0); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.body.getattr.getattr_flags == 0 && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.size = 1; out.body.attr.attr.blocks = 2; out.body.attr.attr.atime = 3; out.body.attr.attr.mtime = 4; out.body.attr.attr.ctime = 5; out.body.attr.attr.atimensec = 6; out.body.attr.attr.mtimensec = 7; out.body.attr.attr.ctimensec = 8; out.body.attr.attr.nlink = 9; out.body.attr.attr.uid = 10; out.body.attr.attr.gid = 11; out.body.attr.attr.rdev = 12; out.body.attr.attr.blksize = 12345; }))); ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); EXPECT_EQ(1, sb.st_size); EXPECT_EQ(2, sb.st_blocks); EXPECT_EQ(3, sb.st_atim.tv_sec); EXPECT_EQ(6, sb.st_atim.tv_nsec); EXPECT_EQ(4, sb.st_mtim.tv_sec); EXPECT_EQ(7, sb.st_mtim.tv_nsec); EXPECT_EQ(5, sb.st_ctim.tv_sec); EXPECT_EQ(8, sb.st_ctim.tv_nsec); EXPECT_EQ(9ull, sb.st_nlink); EXPECT_EQ(10ul, sb.st_uid); EXPECT_EQ(11ul, sb.st_gid); EXPECT_EQ(12ul, sb.st_rdev); EXPECT_EQ((blksize_t)12345, sb.st_blksize); EXPECT_EQ(ino, sb.st_ino); EXPECT_EQ(S_IFREG | 0644, sb.st_mode); //st_birthtim and st_flags are not supported by protocol 7.8. They're //only supported as OS-specific extensions to OSX. //EXPECT_EQ(, sb.st_birthtim); //EXPECT_EQ(, sb.st_flags); //FUSE can't set st_blksize until protocol 7.9 } +/* + * FUSE_GETATTR returns a different file type, even though the entry cache + * hasn't expired. This is a server bug! It probably means that the server + * removed the file and recreated it with the same inode but a different vtyp. + * The best thing fusefs can do is return ENOENT to the caller. After all, the + * entry must not have existed recently. + */ +TEST_F(Getattr, vtyp_conflict) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const uint64_t ino = 42; + struct stat sb; + sem_t sem; + + ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno); + + EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) + .WillOnce(Invoke( + ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, entry); + out.body.entry.attr.mode = S_IFREG | 0644; + out.body.entry.nodeid = ino; + out.body.entry.attr.nlink = 1; + out.body.entry.attr_valid = 0; + out.body.entry.entry_valid = UINT64_MAX; + }))); + EXPECT_CALL(*m_mock, process( + ResultOf([](auto in) { + return (in.header.opcode == FUSE_GETATTR && + in.body.getattr.getattr_flags == 0 && + in.header.nodeid == ino); + }, Eq(true)), + _) + ).WillOnce(Invoke(ReturnImmediate([](auto i __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.ino = ino; // Must match nodeid + out.body.attr.attr.mode = S_IFDIR | 0755; // Changed! + out.body.attr.attr.nlink = 2; + }))); + // We should reclaim stale vnodes + expect_forget(ino, 1, &sem); + + ASSERT_NE(0, stat(FULLPATH, &sb)); + EXPECT_EQ(errno, ENOENT); + + sem_wait(&sem); + sem_destroy(&sem); +} + TEST_F(Getattr_7_8, ok) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry_7_8); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 1; out.body.entry.attr.size = 1; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr_7_8); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.size = 1; out.body.attr.attr.blocks = 2; out.body.attr.attr.atime = 3; out.body.attr.attr.mtime = 4; out.body.attr.attr.ctime = 5; out.body.attr.attr.atimensec = 6; out.body.attr.attr.mtimensec = 7; out.body.attr.attr.ctimensec = 8; out.body.attr.attr.nlink = 9; out.body.attr.attr.uid = 10; out.body.attr.attr.gid = 11; out.body.attr.attr.rdev = 12; }))); ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); EXPECT_EQ(1, sb.st_size); EXPECT_EQ(2, sb.st_blocks); EXPECT_EQ(3, sb.st_atim.tv_sec); EXPECT_EQ(6, sb.st_atim.tv_nsec); EXPECT_EQ(4, sb.st_mtim.tv_sec); EXPECT_EQ(7, sb.st_mtim.tv_nsec); EXPECT_EQ(5, sb.st_ctim.tv_sec); EXPECT_EQ(8, sb.st_ctim.tv_nsec); EXPECT_EQ(9ull, sb.st_nlink); EXPECT_EQ(10ul, sb.st_uid); EXPECT_EQ(11ul, sb.st_gid); EXPECT_EQ(12ul, sb.st_rdev); EXPECT_EQ(ino, sb.st_ino); EXPECT_EQ(S_IFREG | 0644, sb.st_mode); //st_birthtim and st_flags are not supported by protocol 7.8. They're //only supported as OS-specific extensions to OSX. } diff --git a/tests/sys/fs/fusefs/lookup.cc b/tests/sys/fs/fusefs/lookup.cc index cb9d0bb27527..d301990c2048 100644 --- a/tests/sys/fs/fusefs/lookup.cc +++ b/tests/sys/fs/fusefs/lookup.cc @@ -1,383 +1,401 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include } #include "mockfs.hh" #include "utils.hh" using namespace testing; class Lookup: public FuseTest {}; class Lookup_7_8: public Lookup { public: virtual void SetUp() { m_kernel_minor_version = 8; Lookup::SetUp(); } }; /* * If lookup returns a non-zero cache timeout, then subsequent VOP_GETATTRs * should use the cached attributes, rather than query the daemon */ TEST_F(Lookup, attr_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const uint64_t generation = 13; struct stat sb; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.attr.ino = ino; // Must match nodeid out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.attr.size = 1; out.body.entry.attr.blocks = 2; out.body.entry.attr.atime = 3; out.body.entry.attr.mtime = 4; out.body.entry.attr.ctime = 5; out.body.entry.attr.atimensec = 6; out.body.entry.attr.mtimensec = 7; out.body.entry.attr.ctimensec = 8; out.body.entry.attr.nlink = 9; out.body.entry.attr.uid = 10; out.body.entry.attr.gid = 11; out.body.entry.attr.rdev = 12; out.body.entry.generation = generation; }))); /* stat(2) issues a VOP_LOOKUP followed by a VOP_GETATTR */ ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); EXPECT_EQ(1, sb.st_size); EXPECT_EQ(2, sb.st_blocks); EXPECT_EQ(3, sb.st_atim.tv_sec); EXPECT_EQ(6, sb.st_atim.tv_nsec); EXPECT_EQ(4, sb.st_mtim.tv_sec); EXPECT_EQ(7, sb.st_mtim.tv_nsec); EXPECT_EQ(5, sb.st_ctim.tv_sec); EXPECT_EQ(8, sb.st_ctim.tv_nsec); EXPECT_EQ(9ull, sb.st_nlink); EXPECT_EQ(10ul, sb.st_uid); EXPECT_EQ(11ul, sb.st_gid); EXPECT_EQ(12ul, sb.st_rdev); EXPECT_EQ(ino, sb.st_ino); EXPECT_EQ(S_IFREG | 0644, sb.st_mode); // fuse(4) does not _yet_ support inode generations //EXPECT_EQ(generation, sb.st_gen); //st_birthtim and st_flags are not supported by protocol 7.8. They're //only supported as OS-specific extensions to OSX. //EXPECT_EQ(, sb.st_birthtim); //EXPECT_EQ(, sb.st_flags); //FUSE can't set st_blksize until protocol 7.9 } /* * If lookup returns a finite but non-zero cache timeout, then we should discard * the cached attributes and requery the daemon. */ TEST_F(Lookup, attr_cache_timeout) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .Times(2) .WillRepeatedly(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.nodeid = ino; out.body.entry.attr_valid_nsec = NAP_NS / 2; out.body.entry.attr.ino = ino; // Must match nodeid out.body.entry.attr.mode = S_IFREG | 0644; }))); /* access(2) will issue a VOP_LOOKUP and fill the attr cache */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); /* Next access(2) will use the cached attributes */ nap(); /* The cache has timed out; VOP_GETATTR should query the daemon*/ ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); } TEST_F(Lookup, dot) { const char FULLPATH[] = "mountpoint/some_dir/."; const char RELDIRPATH[] = "some_dir"; uint64_t ino = 42; EXPECT_LOOKUP(FUSE_ROOT_ID, RELDIRPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFDIR | 0755; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.entry_valid = UINT64_MAX; }))); /* * access(2) is one of the few syscalls that will not (always) follow * up a successful VOP_LOOKUP with another VOP. */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } TEST_F(Lookup, dotdot) { const char FULLPATH[] = "mountpoint/some_dir/.."; const char RELDIRPATH[] = "some_dir"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELDIRPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFDIR | 0755; out.body.entry.nodeid = 14; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.entry_valid = UINT64_MAX; }))); /* * access(2) is one of the few syscalls that will not (always) follow * up a successful VOP_LOOKUP with another VOP. */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } TEST_F(Lookup, enoent) { const char FULLPATH[] = "mountpoint/does_not_exist"; const char RELPATH[] = "does_not_exist"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnErrno(ENOENT))); EXPECT_NE(0, access(FULLPATH, F_OK)); EXPECT_EQ(ENOENT, errno); } TEST_F(Lookup, enotdir) { const char FULLPATH[] = "mountpoint/not_a_dir/some_file.txt"; const char RELPATH[] = "not_a_dir"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.entry_valid = UINT64_MAX; out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = 42; }))); ASSERT_EQ(-1, access(FULLPATH, F_OK)); ASSERT_EQ(ENOTDIR, errno); } /* * If lookup returns a non-zero entry timeout, then subsequent VOP_LOOKUPs * should use the cached inode rather than requery the daemon */ TEST_F(Lookup, entry_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.entry_valid = UINT64_MAX; out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = 14; }))); ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); /* The second access(2) should use the cache */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } /* * If the daemon returns an error of 0 and an inode of 0, that's a flag for * "ENOENT and cache it" with the given entry_timeout */ TEST_F(Lookup, entry_cache_negative) { struct timespec entry_valid = {.tv_sec = TIME_T_MAX, .tv_nsec = 0}; EXPECT_LOOKUP(FUSE_ROOT_ID, "does_not_exist") .Times(1) .WillOnce(Invoke(ReturnNegativeCache(&entry_valid))); EXPECT_NE(0, access("mountpoint/does_not_exist", F_OK)); EXPECT_EQ(ENOENT, errno); EXPECT_NE(0, access("mountpoint/does_not_exist", F_OK)); EXPECT_EQ(ENOENT, errno); } /* Negative entry caches should timeout, too */ TEST_F(Lookup, entry_cache_negative_timeout) { const char *RELPATH = "does_not_exist"; const char *FULLPATH = "mountpoint/does_not_exist"; struct timespec entry_valid = {.tv_sec = 0, .tv_nsec = NAP_NS / 2}; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .Times(2) .WillRepeatedly(Invoke(ReturnNegativeCache(&entry_valid))); EXPECT_NE(0, access(FULLPATH, F_OK)); EXPECT_EQ(ENOENT, errno); nap(); /* The cache has timed out; VOP_LOOKUP should requery the daemon*/ EXPECT_NE(0, access(FULLPATH, F_OK)); EXPECT_EQ(ENOENT, errno); } /* * If lookup returns a finite but non-zero entry cache timeout, then we should * discard the cached inode and requery the daemon */ TEST_F(Lookup, entry_cache_timeout) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .Times(2) .WillRepeatedly(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.entry_valid_nsec = NAP_NS / 2; out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = 14; }))); /* access(2) will issue a VOP_LOOKUP and fill the entry cache */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); /* Next access(2) will use the cached entry */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); nap(); /* The cache has timed out; VOP_LOOKUP should requery the daemon*/ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } TEST_F(Lookup, ok) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = 14; }))); /* * access(2) is one of the few syscalls that will not (always) follow * up a successful VOP_LOOKUP with another VOP. */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } // Lookup in a subdirectory of the fuse mount TEST_F(Lookup, subdir) { const char FULLPATH[] = "mountpoint/some_dir/some_file.txt"; const char DIRPATH[] = "some_dir"; const char RELPATH[] = "some_file.txt"; uint64_t dir_ino = 2; uint64_t file_ino = 3; EXPECT_LOOKUP(FUSE_ROOT_ID, DIRPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFDIR | 0755; out.body.entry.nodeid = dir_ino; }))); EXPECT_LOOKUP(dir_ino, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = file_ino; }))); /* * access(2) is one of the few syscalls that will not (always) follow * up a successful VOP_LOOKUP with another VOP. */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } -/* - * The server returns two different vtypes for the same nodeid. This is a bad - * server! But we shouldn't crash. +/* + * The server returns two different vtypes for the same nodeid. This is + * technically allowed if the entry's cache has already expired. + * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=258022 */ TEST_F(Lookup, vtype_conflict) { const char FIRSTFULLPATH[] = "mountpoint/foo"; const char SECONDFULLPATH[] = "mountpoint/bar"; const char FIRSTRELPATH[] = "foo"; const char SECONDRELPATH[] = "bar"; uint64_t ino = 42; - expect_lookup(FIRSTRELPATH, ino, S_IFREG | 0644, 0, 1, UINT64_MAX); - expect_lookup(SECONDRELPATH, ino, S_IFDIR | 0755, 0, 1, UINT64_MAX); + EXPECT_LOOKUP(FUSE_ROOT_ID, FIRSTRELPATH) + .WillOnce(Invoke( + ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, entry); + out.body.entry.attr.mode = S_IFDIR | 0644; + out.body.entry.nodeid = ino; + out.body.entry.attr.nlink = 1; + }))); + expect_lookup(SECONDRELPATH, ino, S_IFREG | 0755, 0, 1, UINT64_MAX); + // VOP_FORGET happens asynchronously, so it may or may not arrive + // before the test completes. + EXPECT_CALL(*m_mock, process( + ResultOf([=](auto in) { + return (in.header.opcode == FUSE_FORGET && + in.header.nodeid == ino && + in.body.forget.nlookup == 1); + }, Eq(true)), + _) + ).Times(AtMost(1)) + .WillOnce(Invoke([=](auto in __unused, auto &out __unused) { })); ASSERT_EQ(0, access(FIRSTFULLPATH, F_OK)) << strerror(errno); - ASSERT_EQ(-1, access(SECONDFULLPATH, F_OK)); - ASSERT_EQ(EAGAIN, errno); + EXPECT_EQ(0, access(SECONDFULLPATH, F_OK)) << strerror(errno); } TEST_F(Lookup_7_8, ok) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry_7_8); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = 14; }))); /* * access(2) is one of the few syscalls that will not (always) follow * up a successful VOP_LOOKUP with another VOP. */ ASSERT_EQ(0, access(FULLPATH, F_OK)) << strerror(errno); } diff --git a/tests/sys/fs/fusefs/setattr.cc b/tests/sys/fs/fusefs/setattr.cc index 48aa8385517f..e4458db9f8ee 100644 --- a/tests/sys/fs/fusefs/setattr.cc +++ b/tests/sys/fs/fusefs/setattr.cc @@ -1,778 +1,825 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ extern "C" { #include #include } #include "mockfs.hh" #include "utils.hh" using namespace testing; class Setattr : public FuseTest {}; class RofsSetattr: public Setattr { public: virtual void SetUp() { m_ro = true; Setattr::SetUp(); } }; class Setattr_7_8: public Setattr { public: virtual void SetUp() { m_kernel_minor_version = 8; Setattr::SetUp(); } }; /* * If setattr returns a non-zero cache timeout, then subsequent VOP_GETATTRs * should use the cached attributes, rather than query the daemon */ TEST_F(Setattr, attr_cache) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; struct stat sb; const mode_t newmode = 0644; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillRepeatedly(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.entry_valid = UINT64_MAX; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | newmode; out.body.attr.attr_valid = UINT64_MAX; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_GETATTR); }, Eq(true)), _) ).Times(0); /* Set an attribute with SETATTR */ ASSERT_EQ(0, chmod(FULLPATH, newmode)) << strerror(errno); /* The stat(2) should use cached attributes */ ASSERT_EQ(0, stat(FULLPATH, &sb)); EXPECT_EQ(S_IFREG | newmode, sb.st_mode); } /* Change the mode of a file */ TEST_F(Setattr, chmod) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const mode_t oldmode = 0755; const mode_t newmode = 0644; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { uint32_t valid = FATTR_MODE; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.mode == newmode); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | newmode; }))); EXPECT_EQ(0, chmod(FULLPATH, newmode)) << strerror(errno); } /* * Chmod a multiply-linked file with cached attributes. Check that both files' * attributes have changed. */ TEST_F(Setattr, chmod_multiply_linked) { const char FULLPATH0[] = "mountpoint/some_file.txt"; const char RELPATH0[] = "some_file.txt"; const char FULLPATH1[] = "mountpoint/other_file.txt"; const char RELPATH1[] = "other_file.txt"; struct stat sb; const uint64_t ino = 42; const mode_t oldmode = 0777; const mode_t newmode = 0666; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH0) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 2; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.entry_valid = UINT64_MAX; }))); EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH1) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; out.body.entry.attr.nlink = 2; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.entry_valid = UINT64_MAX; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { uint32_t valid = FATTR_MODE; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.mode == newmode); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; out.body.attr.attr.mode = S_IFREG | newmode; out.body.attr.attr.nlink = 2; out.body.attr.attr_valid = UINT64_MAX; }))); /* For a lookup of the 2nd file to get it into the cache*/ ASSERT_EQ(0, stat(FULLPATH1, &sb)) << strerror(errno); EXPECT_EQ(S_IFREG | oldmode, sb.st_mode); ASSERT_EQ(0, chmod(FULLPATH0, newmode)) << strerror(errno); ASSERT_EQ(0, stat(FULLPATH0, &sb)) << strerror(errno); EXPECT_EQ(S_IFREG | newmode, sb.st_mode); ASSERT_EQ(0, stat(FULLPATH1, &sb)) << strerror(errno); EXPECT_EQ(S_IFREG | newmode, sb.st_mode); } /* Change the owner and group of a file */ TEST_F(Setattr, chown) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const gid_t oldgroup = 66; const gid_t newgroup = 99; const uid_t olduser = 33; const uid_t newuser = 44; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr.gid = oldgroup; out.body.entry.attr.uid = olduser; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { uint32_t valid = FATTR_GID | FATTR_UID; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.uid == newuser && in.body.setattr.gid == newgroup); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.uid = newuser; out.body.attr.attr.gid = newgroup; }))); EXPECT_EQ(0, chown(FULLPATH, newuser, newgroup)) << strerror(errno); } /* * FUSE daemons are allowed to check permissions however they like. If the * daemon returns EPERM, even if the file permissions "should" grant access, * then fuse(4) should return EPERM too. */ TEST_F(Setattr, eperm) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0777; out.body.entry.nodeid = ino; out.body.entry.attr.uid = in.header.uid; out.body.entry.attr.gid = in.header.gid; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnErrno(EPERM))); EXPECT_NE(0, truncate(FULLPATH, 10)); EXPECT_EQ(EPERM, errno); } /* Change the mode of an open file, by its file descriptor */ TEST_F(Setattr, fchmod) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd; const mode_t oldmode = 0755; const mode_t newmode = 0644; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_OPEN && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(out.header); SET_OUT_HEADER_LEN(out, open); }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { uint32_t valid = FATTR_MODE; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.mode == newmode); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | newmode; }))); fd = open(FULLPATH, O_RDONLY); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(0, fchmod(fd, newmode)) << strerror(errno); leak(fd); } /* Change the size of an open file, by its file descriptor */ TEST_F(Setattr, ftruncate) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; uint64_t ino = 42; int fd; uint64_t fh = 0xdeadbeef1a7ebabe; const off_t oldsize = 99; const off_t newsize = 12345; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0755; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.attr.size = oldsize; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_OPEN && in.header.nodeid == ino); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { out.header.len = sizeof(out.header); SET_OUT_HEADER_LEN(out, open); out.body.open.fh = fh; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { uint32_t valid = FATTR_SIZE | FATTR_FH; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.fh == fh); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0755; out.body.attr.attr.size = newsize; }))); fd = open(FULLPATH, O_RDWR); ASSERT_LE(0, fd) << strerror(errno); ASSERT_EQ(0, ftruncate(fd, newsize)) << strerror(errno); leak(fd); } /* Change the size of the file */ TEST_F(Setattr, truncate) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const uint64_t oldsize = 100'000'000; const uint64_t newsize = 20'000'000; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr.size = oldsize; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { uint32_t valid = FATTR_SIZE; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.size == newsize); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.size = newsize; }))); EXPECT_EQ(0, truncate(FULLPATH, newsize)) << strerror(errno); } /* * Truncating a file should discard cached data past the truncation point. * This is a regression test for bug 233783. * * There are two distinct failure modes. The first one is a failure to zero * the portion of the file's final buffer past EOF. It can be reproduced by * fsx -WR -P /tmp -S10 fsx.bin * * The second is a failure to drop buffers beyond that. It can be reproduced by * fsx -WR -P /tmp -S18 -n fsx.bin * Also reproducible in sh with: * $> /path/to/libfuse/build/example/passthrough -d /tmp/mnt * $> cd /tmp/mnt/tmp * $> dd if=/dev/random of=randfile bs=1k count=192 * $> truncate -s 1k randfile && truncate -s 192k randfile * $> xxd randfile | less # xxd will wrongly show random data at offset 0x8000 */ TEST_F(Setattr, truncate_discards_cached_data) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; void *w0buf, *r0buf, *r1buf, *expected; off_t w0_offset = 0; size_t w0_size = 0x30000; off_t r0_offset = 0; off_t r0_size = w0_size; size_t trunc0_size = 0x400; size_t trunc1_size = w0_size; off_t r1_offset = trunc0_size; off_t r1_size = w0_size - trunc0_size; size_t cur_size = 0; const uint64_t ino = 42; mode_t mode = S_IFREG | 0644; int fd, r; bool should_have_data = false; w0buf = malloc(w0_size); ASSERT_NE(nullptr, w0buf) << strerror(errno); memset(w0buf, 'X', w0_size); r0buf = malloc(r0_size); ASSERT_NE(nullptr, r0buf) << strerror(errno); r1buf = malloc(r1_size); ASSERT_NE(nullptr, r1buf) << strerror(errno); expected = malloc(r1_size); ASSERT_NE(nullptr, expected) << strerror(errno); memset(expected, 0, r1_size); expect_lookup(RELPATH, ino, mode, 0, 1); expect_open(ino, O_RDWR, 1); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_GETATTR && in.header.nodeid == ino); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([&](auto i __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; out.body.attr.attr.mode = mode; out.body.attr.attr.size = cur_size; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_WRITE); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([&](auto in, auto& out) { SET_OUT_HEADER_LEN(out, write); out.body.attr.attr.ino = ino; out.body.write.size = in.body.write.size; cur_size = std::max(static_cast(cur_size), in.body.write.size + in.body.write.offset); }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && (in.body.setattr.valid & FATTR_SIZE)); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([&](auto in, auto& out) { auto trunc_size = in.body.setattr.size; SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; out.body.attr.attr.mode = mode; out.body.attr.attr.size = trunc_size; cur_size = trunc_size; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { return (in.header.opcode == FUSE_READ); }, Eq(true)), _) ).WillRepeatedly(Invoke(ReturnImmediate([&](auto in, auto& out) { auto osize = std::min( static_cast(cur_size) - in.body.read.offset, static_cast(in.body.read.size)); out.header.len = sizeof(struct fuse_out_header) + osize; if (should_have_data) memset(out.body.bytes, 'X', osize); else bzero(out.body.bytes, osize); }))); fd = open(FULLPATH, O_RDWR, 0644); ASSERT_LE(0, fd) << strerror(errno); /* Fill the file with Xs */ ASSERT_EQ(static_cast(w0_size), pwrite(fd, w0buf, w0_size, w0_offset)); should_have_data = true; /* Fill the cache */ ASSERT_EQ(static_cast(r0_size), pread(fd, r0buf, r0_size, r0_offset)); /* 1st truncate should discard cached data */ EXPECT_EQ(0, ftruncate(fd, trunc0_size)) << strerror(errno); should_have_data = false; /* 2nd truncate extends file into previously cached data */ EXPECT_EQ(0, ftruncate(fd, trunc1_size)) << strerror(errno); /* Read should return all zeros */ ASSERT_EQ(static_cast(r1_size), pread(fd, r1buf, r1_size, r1_offset)); r = memcmp(expected, r1buf, r1_size); ASSERT_EQ(0, r); free(expected); free(r1buf); free(r0buf); free(w0buf); leak(fd); } /* Change a file's timestamps */ TEST_F(Setattr, utimensat) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const timespec oldtimes[2] = { {.tv_sec = 1, .tv_nsec = 2}, {.tv_sec = 3, .tv_nsec = 4}, }; const timespec newtimes[2] = { {.tv_sec = 5, .tv_nsec = 6}, {.tv_sec = 7, .tv_nsec = 8}, }; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.attr.atime = oldtimes[0].tv_sec; out.body.entry.attr.atimensec = oldtimes[0].tv_nsec; out.body.entry.attr.mtime = oldtimes[1].tv_sec; out.body.entry.attr.mtimensec = oldtimes[1].tv_nsec; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { uint32_t valid = FATTR_ATIME | FATTR_MTIME; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && (time_t)in.body.setattr.atime == newtimes[0].tv_sec && (long)in.body.setattr.atimensec == newtimes[0].tv_nsec && (time_t)in.body.setattr.mtime == newtimes[1].tv_sec && (long)in.body.setattr.mtimensec == newtimes[1].tv_nsec); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.atime = newtimes[0].tv_sec; out.body.attr.attr.atimensec = newtimes[0].tv_nsec; out.body.attr.attr.mtime = newtimes[1].tv_sec; out.body.attr.attr.mtimensec = newtimes[1].tv_nsec; }))); EXPECT_EQ(0, utimensat(AT_FDCWD, FULLPATH, &newtimes[0], 0)) << strerror(errno); } /* Change a file mtime but not its atime */ TEST_F(Setattr, utimensat_mtime_only) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const timespec oldtimes[2] = { {.tv_sec = 1, .tv_nsec = 2}, {.tv_sec = 3, .tv_nsec = 4}, }; const timespec newtimes[2] = { {.tv_sec = 5, .tv_nsec = UTIME_OMIT}, {.tv_sec = 7, .tv_nsec = 8}, }; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.attr.atime = oldtimes[0].tv_sec; out.body.entry.attr.atimensec = oldtimes[0].tv_nsec; out.body.entry.attr.mtime = oldtimes[1].tv_sec; out.body.entry.attr.mtimensec = oldtimes[1].tv_nsec; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { uint32_t valid = FATTR_MTIME; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && (time_t)in.body.setattr.mtime == newtimes[1].tv_sec && (long)in.body.setattr.mtimensec == newtimes[1].tv_nsec); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.atime = oldtimes[0].tv_sec; out.body.attr.attr.atimensec = oldtimes[0].tv_nsec; out.body.attr.attr.mtime = newtimes[1].tv_sec; out.body.attr.attr.mtimensec = newtimes[1].tv_nsec; }))); EXPECT_EQ(0, utimensat(AT_FDCWD, FULLPATH, &newtimes[0], 0)) << strerror(errno); } /* * Set a file's mtime and atime to now * * The design of FreeBSD's VFS does not allow fusefs to set just one of atime * or mtime to UTIME_NOW; it's both or neither. */ TEST_F(Setattr, utimensat_utime_now) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const timespec oldtimes[2] = { {.tv_sec = 1, .tv_nsec = 2}, {.tv_sec = 3, .tv_nsec = 4}, }; const timespec newtimes[2] = { {.tv_sec = 0, .tv_nsec = UTIME_NOW}, {.tv_sec = 0, .tv_nsec = UTIME_NOW}, }; /* "now" is whatever the server says it is */ const timespec now[2] = { {.tv_sec = 5, .tv_nsec = 7}, {.tv_sec = 6, .tv_nsec = 8}, }; struct stat sb; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | 0644; out.body.entry.nodeid = ino; out.body.entry.attr_valid = UINT64_MAX; out.body.entry.entry_valid = UINT64_MAX; out.body.entry.attr.atime = oldtimes[0].tv_sec; out.body.entry.attr.atimensec = oldtimes[0].tv_nsec; out.body.entry.attr.mtime = oldtimes[1].tv_sec; out.body.entry.attr.mtimensec = oldtimes[1].tv_nsec; }))); EXPECT_CALL(*m_mock, process( ResultOf([=](auto in) { uint32_t valid = FATTR_ATIME | FATTR_ATIME_NOW | FATTR_MTIME | FATTR_MTIME_NOW; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | 0644; out.body.attr.attr.atime = now[0].tv_sec; out.body.attr.attr.atimensec = now[0].tv_nsec; out.body.attr.attr.mtime = now[1].tv_sec; out.body.attr.attr.mtimensec = now[1].tv_nsec; out.body.attr.attr_valid = UINT64_MAX; }))); ASSERT_EQ(0, utimensat(AT_FDCWD, FULLPATH, &newtimes[0], 0)) << strerror(errno); ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); EXPECT_EQ(now[0].tv_sec, sb.st_atim.tv_sec); EXPECT_EQ(now[0].tv_nsec, sb.st_atim.tv_nsec); EXPECT_EQ(now[1].tv_sec, sb.st_mtim.tv_sec); EXPECT_EQ(now[1].tv_nsec, sb.st_mtim.tv_nsec); } +/* + * FUSE_SETATTR returns a different file type, even though the entry cache + * hasn't expired. This is a server bug! It probably means that the server + * removed the file and recreated it with the same inode but a different vtyp. + * The best thing fusefs can do is return ENOENT to the caller. After all, the + * entry must not have existed recently. + */ +TEST_F(Setattr, vtyp_conflict) +{ + const char FULLPATH[] = "mountpoint/some_file.txt"; + const char RELPATH[] = "some_file.txt"; + const uint64_t ino = 42; + uid_t newuser = 12345; + sem_t sem; + + ASSERT_EQ(0, sem_init(&sem, 0, 0)) << strerror(errno); + + EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) + .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, entry); + out.body.entry.attr.mode = S_IFREG | 0777; + out.body.entry.nodeid = ino; + out.body.entry.entry_valid = UINT64_MAX; + }))); + + EXPECT_CALL(*m_mock, process( + ResultOf([](auto in) { + return (in.header.opcode == FUSE_SETATTR && + in.header.nodeid == ino); + }, Eq(true)), + _) + ).WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.ino = ino; + out.body.attr.attr.mode = S_IFDIR | 0777; // Changed! + out.body.attr.attr.uid = newuser; + }))); + // We should reclaim stale vnodes + expect_forget(ino, 1, &sem); + + EXPECT_NE(0, chown(FULLPATH, newuser, -1)); + EXPECT_EQ(ENOENT, errno); + + sem_wait(&sem); + sem_destroy(&sem); +} + /* On a read-only mount, no attributes may be changed */ TEST_F(RofsSetattr, erofs) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const mode_t oldmode = 0755; const mode_t newmode = 0644; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; }))); ASSERT_EQ(-1, chmod(FULLPATH, newmode)); ASSERT_EQ(EROFS, errno); } /* Change the mode of a file */ TEST_F(Setattr_7_8, chmod) { const char FULLPATH[] = "mountpoint/some_file.txt"; const char RELPATH[] = "some_file.txt"; const uint64_t ino = 42; const mode_t oldmode = 0755; const mode_t newmode = 0644; EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH) .WillOnce(Invoke(ReturnImmediate([=](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, entry_7_8); out.body.entry.attr.mode = S_IFREG | oldmode; out.body.entry.nodeid = ino; }))); EXPECT_CALL(*m_mock, process( ResultOf([](auto in) { uint32_t valid = FATTR_MODE; return (in.header.opcode == FUSE_SETATTR && in.header.nodeid == ino && in.body.setattr.valid == valid && in.body.setattr.mode == newmode); }, Eq(true)), _) ).WillOnce(Invoke(ReturnImmediate([](auto in __unused, auto& out) { SET_OUT_HEADER_LEN(out, attr_7_8); out.body.attr.attr.ino = ino; // Must match nodeid out.body.attr.attr.mode = S_IFREG | newmode; }))); EXPECT_EQ(0, chmod(FULLPATH, newmode)) << strerror(errno); }