Index: projects/fuse2/sys/fs/fuse/fuse_device.c =================================================================== --- projects/fuse2/sys/fs/fuse/fuse_device.c (revision 348486) +++ projects/fuse2/sys/fs/fuse/fuse_device.c (revision 348487) @@ -1,546 +1,564 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" +#include "fuse_internal.h" #include "fuse_ipc.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , device, trace, "int", "char*"); static struct cdev *fuse_dev; static d_kqfilter_t fuse_device_filter; static d_open_t fuse_device_open; static d_poll_t fuse_device_poll; static d_read_t fuse_device_read; static d_write_t fuse_device_write; static struct cdevsw fuse_device_cdevsw = { .d_kqfilter = fuse_device_filter, .d_open = fuse_device_open, .d_name = "fuse", .d_poll = fuse_device_poll, .d_read = fuse_device_read, .d_write = fuse_device_write, .d_version = D_VERSION, }; static int fuse_device_filt_read(struct knote *kn, long hint); static void fuse_device_filt_detach(struct knote *kn); struct filterops fuse_device_rfiltops = { .f_isfd = 1, .f_detach = fuse_device_filt_detach, .f_event = fuse_device_filt_read, }; /**************************** * * >>> Fuse device op defs * ****************************/ static void fdata_dtor(void *arg) { struct fuse_data *fdata; struct fuse_ticket *tick; fdata = arg; if (fdata == NULL) return; fdata_set_dead(fdata); FUSE_LOCK(); fuse_lck_mtx_lock(fdata->aw_mtx); /* wakup poll()ers */ selwakeuppri(&fdata->ks_rsel, PZERO + 1); /* Don't let syscall handlers wait in vain */ while ((tick = fuse_aw_pop(fdata))) { fuse_lck_mtx_lock(tick->tk_aw_mtx); fticket_set_answered(tick); tick->tk_aw_errno = ENOTCONN; wakeup(tick); fuse_lck_mtx_unlock(tick->tk_aw_mtx); FUSE_ASSERT_AW_DONE(tick); fuse_ticket_drop(tick); } fuse_lck_mtx_unlock(fdata->aw_mtx); FUSE_UNLOCK(); fdata_trydestroy(fdata); } static int fuse_device_filter(struct cdev *dev, struct knote *kn) { struct fuse_data *data; int error; error = devfs_get_cdevpriv((void **)&data); /* EVFILT_WRITE is not supported; the device is always ready to write */ if (error == 0 && kn->kn_filter == EVFILT_READ) { kn->kn_fop = &fuse_device_rfiltops; kn->kn_hook = data; knlist_add(&data->ks_rsel.si_note, kn, 0); error = 0; } else if (error == 0) { error = EINVAL; kn->kn_data = error; } return (error); } static void fuse_device_filt_detach(struct knote *kn) { struct fuse_data *data; data = (struct fuse_data*)kn->kn_hook; MPASS(data != NULL); knlist_remove(&data->ks_rsel.si_note, kn, 0); kn->kn_hook = NULL; } static int fuse_device_filt_read(struct knote *kn, long hint) { struct fuse_data *data; int ready; data = (struct fuse_data*)kn->kn_hook; MPASS(data != NULL); mtx_assert(&data->ms_mtx, MA_OWNED); if (fdata_get_dead(data)) { kn->kn_flags |= EV_EOF; kn->kn_fflags = ENODEV; kn->kn_data = 1; ready = 1; } else if (STAILQ_FIRST(&data->ms_head)) { MPASS(data->ms_count >= 1); kn->kn_data = data->ms_count; ready = 1; } else { ready = 0; } return (ready); } /* * Resources are set up on a per-open basis */ static int fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct fuse_data *fdata; int error; SDT_PROBE2(fusefs, , device, trace, 1, "device open"); fdata = fdata_alloc(dev, td->td_ucred); error = devfs_set_cdevpriv(fdata, fdata_dtor); if (error != 0) fdata_trydestroy(fdata); else SDT_PROBE2(fusefs, , device, trace, 1, "device open success"); return (error); } int fuse_device_poll(struct cdev *dev, int events, struct thread *td) { struct fuse_data *data; int error, revents = 0; error = devfs_get_cdevpriv((void **)&data); if (error != 0) return (events & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); if (events & (POLLIN | POLLRDNORM)) { fuse_lck_mtx_lock(data->ms_mtx); if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head)) revents |= events & (POLLIN | POLLRDNORM); else selrecord(td, &data->ks_rsel); fuse_lck_mtx_unlock(data->ms_mtx); } if (events & (POLLOUT | POLLWRNORM)) { revents |= events & (POLLOUT | POLLWRNORM); } return (revents); } /* * fuse_device_read hangs on the queue of VFS messages. * When it's notified that there is a new one, it picks that and * passes up to the daemon */ int fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag) { int err; struct fuse_data *data; struct fuse_ticket *tick; void *buf[] = {NULL, NULL, NULL}; int buflen[3]; int i; SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read"); err = devfs_get_cdevpriv((void **)&data); if (err != 0) return (err); fuse_lck_mtx_lock(data->ms_mtx); again: if (fdata_get_dead(data)) { SDT_PROBE2(fusefs, , device, trace, 2, "we know early on that reader should be kicked so we " "don't wait for news"); fuse_lck_mtx_unlock(data->ms_mtx); return (ENODEV); } if (!(tick = fuse_ms_pop(data))) { /* check if we may block */ if (ioflag & O_NONBLOCK) { /* get outa here soon */ fuse_lck_mtx_unlock(data->ms_mtx); return (EAGAIN); } else { err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0); if (err != 0) { fuse_lck_mtx_unlock(data->ms_mtx); return (fdata_get_dead(data) ? ENODEV : err); } tick = fuse_ms_pop(data); } } if (!tick) { /* * We can get here if fuse daemon suddenly terminates, * eg, by being hit by a SIGKILL * -- and some other cases, too, tho not totally clear, when * (cv_signal/wakeup_one signals the whole process ?) */ SDT_PROBE2(fusefs, , device, trace, 1, "no message on thread"); goto again; } fuse_lck_mtx_unlock(data->ms_mtx); if (fdata_get_dead(data)) { /* * somebody somewhere -- eg., umount routine -- * wants this liaison finished off */ SDT_PROBE2(fusefs, , device, trace, 2, "reader is to be sacked"); if (tick) { SDT_PROBE2(fusefs, , device, trace, 2, "weird -- " "\"kick\" is set tho there is message"); FUSE_ASSERT_MS_DONE(tick); fuse_ticket_drop(tick); } return (ENODEV); /* This should make the daemon get off * of us */ } SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read message successfully"); KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0, ("non-null buf pointer with positive size")); switch (tick->tk_ms_type) { case FT_M_FIOV: buf[0] = tick->tk_ms_fiov.base; buflen[0] = tick->tk_ms_fiov.len; break; case FT_M_BUF: buf[0] = tick->tk_ms_fiov.base; buflen[0] = tick->tk_ms_fiov.len; buf[1] = tick->tk_ms_bufdata; buflen[1] = tick->tk_ms_bufsize; break; default: panic("unknown message type for fuse_ticket %p", tick); } for (i = 0; buf[i]; i++) { /* * Why not ban mercilessly stupid daemons who can't keep up * with us? (There is no much use of a partial read here...) */ /* * XXX note that in such cases Linux FUSE throws EIO at the * syscall invoker and stands back to the message queue. The * rationale should be made clear (and possibly adopt that * behaviour). Keeping the current scheme at least makes * fallacy as loud as possible... */ if (uio->uio_resid < buflen[i]) { fdata_set_dead(data); SDT_PROBE2(fusefs, , device, trace, 2, "daemon is stupid, kick it off..."); err = ENODEV; break; } err = uiomove(buf[i], buflen[i], uio); if (err) break; } FUSE_ASSERT_MS_DONE(tick); fuse_ticket_drop(tick); return (err); } static inline int fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio) { if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) { SDT_PROBE2(fusefs, , device, trace, 1, "Format error: body size " "differs from size claimed by header"); return (EINVAL); } - if (uio->uio_resid && ohead->error) { + if (uio->uio_resid && ohead->unique != 0 && ohead->error) { SDT_PROBE2(fusefs, , device, trace, 1, "Format error: non zero error but message had a body"); return (EINVAL); } - /* Sanitize the linuxism of negative errnos */ - ohead->error = -(ohead->error); return (0); } +SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_notify, + "struct fuse_out_header*"); SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_missing_ticket, "uint64_t"); SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_found, "struct fuse_ticket*"); /* * fuse_device_write first reads the header sent by the daemon. * If that's OK, looks up ticket/callback node by the unique id seen in header. * If the callback node contains a handler function, the uio is passed over * that. */ static int fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag) { struct fuse_out_header ohead; int err = 0; struct fuse_data *data; + struct mount *mp; struct fuse_ticket *tick, *itick, *x_tick; int found = 0; err = devfs_get_cdevpriv((void **)&data); if (err != 0) return (err); + mp = data->mp; if (uio->uio_resid < sizeof(struct fuse_out_header)) { SDT_PROBE2(fusefs, , device, trace, 1, "fuse_device_write got less than a header!"); fdata_set_dead(data); return (EINVAL); } if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0) return (err); /* * We check header information (which is redundant) and compare it * with what we see. If we see some inconsistency we discard the * whole answer and proceed on as if it had never existed. In * particular, no pretender will be woken up, regardless the * "unique" value in the header. */ if ((err = fuse_ohead_audit(&ohead, uio))) { fdata_set_dead(data); return (err); } /* Pass stuff over to callback if there is one installed */ /* Looking for ticket with the unique id of header */ fuse_lck_mtx_lock(data->aw_mtx); TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link, x_tick) { if (tick->tk_unique == ohead.unique) { SDT_PROBE1(fusefs, , device, fuse_device_write_found, tick); found = 1; fuse_aw_remove(tick); break; } } if (found && tick->irq_unique > 0) { /* * Discard the FUSE_INTERRUPT ticket that tried to interrupt * this operation */ TAILQ_FOREACH_SAFE(itick, &data->aw_head, tk_aw_link, x_tick) { if (itick->tk_unique == tick->irq_unique) { fuse_aw_remove(itick); break; } } tick->irq_unique = 0; } fuse_lck_mtx_unlock(data->aw_mtx); if (found) { if (tick->tk_aw_handler) { /* * We found a callback with proper handler. In this * case the out header will be 0wnd by the callback, * so the fun of freeing that is left for her. * (Then, by all chance, she'll just get that's done * via ticket_drop(), so no manual mucking * around...) */ SDT_PROBE2(fusefs, , device, trace, 1, "pass ticket to a callback"); + /* Sanitize the linuxism of negative errnos */ + ohead.error *= -1; memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead)); err = tick->tk_aw_handler(tick, uio); } else { /* pretender doesn't wanna do anything with answer */ SDT_PROBE2(fusefs, , device, trace, 1, "stuff devalidated, so we drop it"); } /* * As aw_mtx was not held during the callback execution the * ticket may have been inserted again. However, this is safe * because fuse_ticket_drop() will deal with refcount anyway. */ fuse_ticket_drop(tick); + } else if (ohead.unique == 0){ + /* unique == 0 means asynchronous notification */ + SDT_PROBE1(fusefs, , device, fuse_device_write_notify, &ohead); + switch (ohead.error) { + case FUSE_NOTIFY_INVAL_ENTRY: + err = fuse_internal_invalidate_entry(mp, uio); + break; + case FUSE_NOTIFY_POLL: + case FUSE_NOTIFY_INVAL_INODE: + default: + /* Not implemented */ + err = ENOSYS; + } } else { /* no callback at all! */ SDT_PROBE1(fusefs, , device, fuse_device_write_missing_ticket, ohead.unique); - if (ohead.error == EAGAIN) { + if (ohead.error == -EAGAIN) { /* * This was probably a response to a FUSE_INTERRUPT * operation whose original operation is already * complete. We can't store FUSE_INTERRUPT tickets * indefinitely because their responses are optional. * So we delete them when the original operation * completes. And sadly the fuse_header_out doesn't * identify the opcode, so we have to guess. */ err = 0; } else { err = EINVAL; } } return (err); } int fuse_device_init(void) { fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, "fuse"); if (fuse_dev == NULL) return (ENOMEM); return (0); } void fuse_device_destroy(void) { MPASS(fuse_dev != NULL); destroy_dev(fuse_dev); } Index: projects/fuse2/sys/fs/fuse/fuse_internal.c =================================================================== --- projects/fuse2/sys/fs/fuse/fuse_internal.c (revision 348486) +++ projects/fuse2/sys/fs/fuse/fuse_internal.c (revision 348487) @@ -1,972 +1,1037 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fuse.h" #include "fuse_file.h" #include "fuse_internal.h" #include "fuse_ipc.h" #include "fuse_node.h" #include "fuse_file.h" SDT_PROVIDER_DECLARE(fusefs); /* * Fuse trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*"); #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len); #endif /* Synchronously send a FUSE_ACCESS operation */ int fuse_internal_access(struct vnode *vp, accmode_t mode, struct thread *td, struct ucred *cred) { int err = 0; uint32_t mask = F_OK; int dataflags; int vtype; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; mp = vnode_mount(vp); vtype = vnode_vtype(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; if (mode == 0) return 0; if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) { switch (vp->v_type) { case VDIR: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ case VREG: return EROFS; default: break; } } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { if (fuse_match_cred(data->daemoncred, cred)) return EPERM; } if (dataflags & FSESS_DEFAULT_PERMISSIONS) { struct vattr va; fuse_internal_getattr(vp, &va, cred, td); return vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, mode, cred, NULL); } if (!fsess_isimpl(mp, FUSE_ACCESS)) return 0; if ((mode & (VWRITE | VAPPEND | VADMIN)) != 0) mask |= W_OK; if ((mode & VREAD) != 0) mask |= R_OK; if ((mode & VEXEC) != 0) mask |= X_OK; fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); fai = fdi.indata; fai->mask = mask; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_ACCESS); err = 0; } return err; } /* * Cache FUSE attributes from attr, in attribute cache associated with vnode * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the * converted attributes there as well. * * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do * return the result to the caller). */ void fuse_internal_cache_attrs(struct vnode *vp, struct ucred *cred, struct fuse_attr *attr, uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap) { struct mount *mp; struct fuse_vnode_data *fvdat; struct fuse_data *data; struct vattr *vp_cache_at; mp = vnode_mount(vp); fvdat = VTOFUD(vp); data = fuse_get_mpdata(mp); if (!cred) cred = curthread->td_ucred; ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs"); fuse_validity_2_bintime(attr_valid, attr_valid_nsec, &fvdat->attr_cache_timeout); /* Fix our buffers if the filesize changed without us knowing */ if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) { (void)fuse_vnode_setsize(vp, cred, attr->size); fvdat->cached_attrs.va_size = attr->size; } if (attr_valid > 0 || attr_valid_nsec > 0) vp_cache_at = &(fvdat->cached_attrs); else if (vap != NULL) vp_cache_at = vap; else return; vattr_null(vp_cache_at); vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0]; vp_cache_at->va_fileid = attr->ino; vp_cache_at->va_mode = attr->mode & ~S_IFMT; vp_cache_at->va_nlink = attr->nlink; vp_cache_at->va_uid = attr->uid; vp_cache_at->va_gid = attr->gid; vp_cache_at->va_rdev = attr->rdev; vp_cache_at->va_size = attr->size; /* XXX on i386, seconds are truncated to 32 bits */ vp_cache_at->va_atime.tv_sec = attr->atime; vp_cache_at->va_atime.tv_nsec = attr->atimensec; vp_cache_at->va_mtime.tv_sec = attr->mtime; vp_cache_at->va_mtime.tv_nsec = attr->mtimensec; vp_cache_at->va_ctime.tv_sec = attr->ctime; vp_cache_at->va_ctime.tv_nsec = attr->ctimensec; if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0) vp_cache_at->va_blocksize = attr->blksize; else vp_cache_at->va_blocksize = PAGE_SIZE; vp_cache_at->va_type = IFTOVT(attr->mode); vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE; vp_cache_at->va_flags = 0; if (vap != vp_cache_at && vap != NULL) memcpy(vap, vp_cache_at, sizeof(*vap)); } /* fsync */ int fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio) { if (tick->tk_aw_ohead.error == ENOSYS) { fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick)); } return 0; } int fuse_internal_fsync(struct vnode *vp, struct thread *td, int waitfor, bool datasync) { struct fuse_fsync_in *ffsi = NULL; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct mount *mp = vnode_mount(vp); int op = FUSE_FSYNC; int err = 0; if (!fsess_isimpl(vnode_mount(vp), (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { return 0; } if (vnode_isdir(vp)) op = FUSE_FSYNCDIR; if (!fsess_isimpl(mp, op)) return 0; fdisp_init(&fdi, sizeof(*ffsi)); /* * fsync every open file handle for this file, because we can't be sure * which file handle the caller is really referring to. */ LIST_FOREACH(fufh, &fvdat->handles, next) { if (ffsi == NULL) fdisp_make_vp(&fdi, op, vp, td, NULL); else fdisp_refresh_vp(&fdi, op, vp, td, NULL); ffsi = fdi.indata; ffsi->fh = fufh->fh_id; ffsi->fsync_flags = 0; if (datasync) ffsi->fsync_flags = 1; if (waitfor == MNT_WAIT) { err = fdisp_wait_answ(&fdi); } else { fuse_insert_callback(fdi.tick, fuse_internal_fsync_callback); fuse_insert_message(fdi.tick, false); } if (err == ENOSYS) { /* ENOSYS means "success, and don't call again" */ fsess_set_notimpl(mp, op); err = 0; break; } } fdisp_destroy(&fdi); return err; } +/* Asynchronous invalidation */ +SDT_PROBE_DEFINE1(fusefs, , internal, invalidate_without_export, + "struct mount*"); +SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_cache_hit, + "struct vnode*", "struct vnode*"); +int +fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio) +{ + struct fuse_notify_inval_entry_out fnieo; + struct fuse_data *data = fuse_get_mpdata(mp); + struct componentname cn; + /*struct vnode *dvp;*/ + struct vnode *dvp, *vp; + char name[PATH_MAX]; + int err; + + if (!(data->dataflags & FSESS_EXPORT_SUPPORT)) { + /* + * Linux allows file systems without export support to use + * asynchronous notification because its inode cache is indexed + * purely by the inode number. But FreeBSD's vnode is cache + * requires access to the entire vnode structure. + */ + SDT_PROBE1(fusefs, , internal, invalidate_without_export, mp); + return (EINVAL); + } + + if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0) + return (err); + + if ((err = uiomove(name, fnieo.namelen, uio)) != 0) + return (err); + name[fnieo.namelen] = '\0'; + /* fusefs does not cache "." or ".." entries */ + if (strncmp(name, ".", sizeof(".")) == 0 || + strncmp(name, "..", sizeof("..")) == 0) + return (0); + + if (fnieo.parent == FUSE_ROOT_ID) + err = VFS_ROOT(mp, LK_SHARED, &dvp); + else + err = VFS_VGET(mp, fnieo.parent, LK_SHARED, &dvp); + if (err != 0) + return (err); + /* + * XXX we can't check dvp's generation because the FUSE invalidate + * entry message doesn't include it. Worse case is that we invalidate + * an entry that didn't need to be invalidated. + */ + + cn.cn_nameiop = LOOKUP; + cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */ + cn.cn_thread = curthread; + cn.cn_cred = curthread->td_ucred; + cn.cn_lkflags = LK_SHARED; + cn.cn_pnbuf = NULL; + cn.cn_nameptr = name; + cn.cn_namelen = fnieo.namelen; + err = cache_lookup(dvp, &vp, &cn, NULL, NULL); + MPASS(err == 0); + fuse_vnode_clear_attr_cache(dvp); + vput(dvp); + return (0); +} + /* mknod */ int fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) { struct fuse_data *data; struct fuse_mknod_in fmni; size_t insize; data = fuse_get_mpdata(dvp->v_mount); fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode); fmni.rdev = vap->va_rdev; if (fuse_libabi_geq(data, 7, 12)) { insize = sizeof(fmni); fmni.umask = curthread->td_proc->p_fd->fd_cmask; } else { insize = FUSE_COMPAT_MKNOD_IN_SIZE; } return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni, insize, vap->va_type)); } /* readdir */ int fuse_internal_readdir(struct vnode *vp, struct uio *uio, off_t startoff, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *ncookies, u_long *cookies) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri = NULL; int fnd_start; if (uio_resid(uio) == 0) return 0; fdisp_init(&fdi, 0); /* * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p * I/O). */ /* * fnd_start is set non-zero once the offset in the directory gets * to the startoff. This is done because directories must be read * from the beginning (offset == 0) when fuse_vnop_readdir() needs * to do an open of the directory. * If it is not set non-zero here, it will be set non-zero in * fuse_internal_readdir_processdata() when uio_offset == startoff. */ fnd_start = 0; if (uio->uio_offset == startoff) fnd_start = 1; while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); if (fri == NULL) fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); else fdisp_refresh_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); fri->size = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_read); if ((err = fdisp_wait_answ(&fdi))) break; if ((err = fuse_internal_readdir_processdata(uio, startoff, &fnd_start, fri->size, fdi.answ, fdi.iosize, cookediov, ncookies, &cookies))) break; } fdisp_destroy(&fdi); return ((err == -1) ? 0 : err); } /* * Return -1 to indicate that this readdir is finished, 0 if it copied * all the directory data read in and it may be possible to read more * and greater than 0 for a failure. */ int fuse_internal_readdir_processdata(struct uio *uio, off_t startoff, int *fnd_start, size_t reqsize, void *buf, size_t bufsize, struct fuse_iov *cookediov, int *ncookies, u_long **cookiesp) { int err = 0; int bytesavail; size_t freclen; struct dirent *de; struct fuse_dirent *fudge; u_long *cookies; cookies = *cookiesp; if (bufsize < FUSE_NAME_OFFSET) return -1; for (;;) { if (bufsize < FUSE_NAME_OFFSET) { err = -1; break; } fudge = (struct fuse_dirent *)buf; freclen = FUSE_DIRENT_SIZE(fudge); if (bufsize < freclen) { /* * This indicates a partial directory entry at the * end of the directory data. */ err = -1; break; } #ifdef ZERO_PAD_INCOMPLETE_BUFS if (isbzero(buf, FUSE_NAME_OFFSET)) { err = -1; break; } #endif if (!fudge->namelen || fudge->namelen > MAXNAMLEN) { err = EINVAL; break; } bytesavail = GENERIC_DIRSIZ((struct pseudo_dirent *) &fudge->namelen); if (bytesavail > uio_resid(uio)) { /* Out of space for the dir so we are done. */ err = -1; break; } /* * Don't start to copy the directory entries out until * the requested offset in the directory is found. */ if (*fnd_start != 0) { fiov_adjust(cookediov, bytesavail); bzero(cookediov->base, bytesavail); de = (struct dirent *)cookediov->base; de->d_fileno = fudge->ino; de->d_reclen = bytesavail; de->d_type = fudge->type; de->d_namlen = fudge->namelen; memcpy((char *)cookediov->base + sizeof(struct dirent) - MAXNAMLEN - 1, (char *)buf + FUSE_NAME_OFFSET, fudge->namelen); dirent_terminate(de); err = uiomove(cookediov->base, cookediov->len, uio); if (err) break; if (cookies != NULL) { if (*ncookies == 0) { err = -1; break; } *cookies = fudge->off; cookies++; (*ncookies)--; } } else if (startoff == fudge->off) *fnd_start = 1; buf = (char *)buf + freclen; bufsize -= freclen; uio_setoffset(uio, fudge->off); } *cookiesp = cookies; return err; } /* remove */ int fuse_internal_remove(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, enum fuse_opcode op) { struct fuse_dispatcher fdi; int err = 0; fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make_vp(&fdi, op, dvp, cnp->cn_thread, cnp->cn_cred); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); return err; } /* rename */ int fuse_internal_rename(struct vnode *fdvp, struct componentname *fcnp, struct vnode *tdvp, struct componentname *tcnp) { struct fuse_dispatcher fdi; struct fuse_rename_in *fri; int err = 0; fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2); fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, tcnp->cn_thread, tcnp->cn_cred); fri = fdi.indata; fri->newdir = VTOI(tdvp); memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr, fcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0'; memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1, tcnp->cn_nameptr, tcnp->cn_namelen); ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 1] = '\0'; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); return err; } /* strategy */ /* entity creation */ void fuse_internal_newentry_makerequest(struct mount *mp, uint64_t dnid, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, struct fuse_dispatcher *fdip) { fdip->iosize = bufsize + cnp->cn_namelen + 1; fdisp_make(fdip, op, mp, dnid, cnp->cn_thread, cnp->cn_cred); memcpy(fdip->indata, buf, bufsize); memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0'; } int fuse_internal_newentry_core(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp, struct fuse_dispatcher *fdip) { int err = 0; struct fuse_entry_out *feo; struct mount *mp = vnode_mount(dvp); if ((err = fdisp_wait_answ(fdip))) { return err; } feo = fdip->answ; if ((err = fuse_internal_checkentry(feo, vtyp))) { return err; } err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp); if (err) { fuse_internal_forget_send(mp, cnp->cn_thread, cnp->cn_cred, feo->nodeid, 1); return err; } /* * Purge the parent's attribute cache because the daemon should've * updated its mtime and ctime */ fuse_vnode_clear_attr_cache(dvp); fuse_internal_cache_attrs(*vpp, NULL, &feo->attr, feo->attr_valid, feo->attr_valid_nsec, NULL); return err; } int fuse_internal_newentry(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, enum vtype vtype) { int err; struct fuse_dispatcher fdi; struct mount *mp = vnode_mount(dvp); fdisp_init(&fdi, 0); fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf, bufsize, &fdi); err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi); fdisp_destroy(&fdi); return err; } /* entity destruction */ int fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio) { fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL, ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1); return 0; } void fuse_internal_forget_send(struct mount *mp, struct thread *td, struct ucred *cred, uint64_t nodeid, uint64_t nlookup) { struct fuse_dispatcher fdi; struct fuse_forget_in *ffi; /* * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu", * (long long unsigned) nodeid)); */ fdisp_init(&fdi, sizeof(*ffi)); fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred); ffi = fdi.indata; ffi->nlookup = nlookup; fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* Fetch the vnode's attributes from the daemon*/ int fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct fuse_dispatcher fdi; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_getattr_in *fgai; struct fuse_attr_out *fao; off_t old_filesize = fvdat->cached_attrs.va_size; enum vtype vtyp; int err; fdisp_init(&fdi, 0); fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred); fgai = fdi.indata; /* * We could look up a file handle and set it in fgai->fh, but that * involves extra runtime work and I'm unaware of any file systems that * care. */ fgai->getattr_flags = 0; if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) { if (err == ENOENT) fuse_internal_vnode_disappear(vp); goto out; } fao = (struct fuse_attr_out *)fdi.answ; vtyp = IFTOVT(fao->attr.mode); if (fvdat->flag & FN_SIZECHANGE) fao->attr.size = old_filesize; fuse_internal_cache_attrs(vp, NULL, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, vap); if (vtyp != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; } out: fdisp_destroy(&fdi); return err; } /* Read a vnode's attributes from cache or fetch them from the fuse daemon */ int fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { struct vattr *attrs; if ((attrs = VTOVA(vp)) != NULL) { *vap = *attrs; /* struct copy */ return 0; } return fuse_internal_do_getattr(vp, vap, cred, td); } void fuse_internal_vnode_disappear(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); fvdat->flag |= FN_REVOKED; bintime_clear(&fvdat->attr_cache_timeout); bintime_clear(&fvdat->entry_cache_timeout); cache_purge(vp); } /* fuse start/stop */ int fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio) { int err = 0; struct fuse_data *data = tick->tk_data; struct fuse_init_out *fiio; if ((err = tick->tk_aw_ohead.error)) { goto out; } if ((err = fticket_pull(tick, uio))) { goto out; } fiio = fticket_resp(tick)->base; /* XXX: Do we want to check anything further besides this? */ if (fiio->major < 7) { SDT_PROBE2(fusefs, , internal, trace, 1, "userpace version too low"); err = EPROTONOSUPPORT; goto out; } data->fuse_libabi_major = fiio->major; data->fuse_libabi_minor = fiio->minor; if (fuse_libabi_geq(data, 7, 5)) { if (fticket_resp(tick)->len == sizeof(struct fuse_init_out)) { data->max_write = fiio->max_write; if (fiio->flags & FUSE_ASYNC_READ) data->dataflags |= FSESS_ASYNC_READ; if (fiio->flags & FUSE_POSIX_LOCKS) data->dataflags |= FSESS_POSIX_LOCKS; if (fiio->flags & FUSE_EXPORT_SUPPORT) data->dataflags |= FSESS_EXPORT_SUPPORT; /* * Don't bother to check FUSE_BIG_WRITES, because it's * redundant with max_write */ } else { err = EINVAL; } } else { /* Old fix values */ data->max_write = 4096; } out: if (err) { fdata_set_dead(data); } FUSE_LOCK(); data->dataflags |= FSESS_INITED; wakeup(&data->ticketer); FUSE_UNLOCK(); return 0; } void fuse_internal_send_init(struct fuse_data *data, struct thread *td) { struct fuse_init_in *fiii; struct fuse_dispatcher fdi; fdisp_init(&fdi, sizeof(*fiii)); fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL); fiii = fdi.indata; fiii->major = FUSE_KERNEL_VERSION; fiii->minor = FUSE_KERNEL_MINOR_VERSION; /* * fusefs currently doesn't do any readahead other than fetching whole * buffer cache block sized regions at once. So the max readahead is * the size of a buffer cache block. */ fiii->max_readahead = maxbcachebuf; /* * Unsupported features: * FUSE_FILE_OPS: No known FUSE server or client supports it * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even * when default ACLs are in use. */ fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; fuse_insert_callback(fdi.tick, fuse_internal_init_callback); fuse_insert_message(fdi.tick, false); fdisp_destroy(&fdi); } /* * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL, * send the request with root credentials */ int fuse_internal_setattr(struct vnode *vp, struct vattr *vap, struct thread *td, struct ucred *cred) { struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; struct mount *mp; pid_t pid = td->td_proc->p_pid; struct fuse_data *data; int dataflags; int err = 0; enum vtype vtyp; int sizechanged = -1; uint64_t newsize = 0; mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); if (!cred) { fdi.finh->uid = 0; fdi.finh->gid = 0; } fsai = fdi.indata; fsai->valid = 0; if (vap->va_uid != (uid_t)VNOVAL) { fsai->uid = vap->va_uid; fsai->valid |= FATTR_UID; } if (vap->va_gid != (gid_t)VNOVAL) { fsai->gid = vap->va_gid; fsai->valid |= FATTR_GID; } if (vap->va_size != VNOVAL) { struct fuse_filehandle *fufh = NULL; /*Truncate to a new value. */ fsai->size = vap->va_size; sizechanged = 1; newsize = vap->va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } VTOFUD(vp)->flag &= ~FN_SIZECHANGE; } if (vap->va_atime.tv_sec != VNOVAL) { fsai->atime = vap->va_atime.tv_sec; fsai->atimensec = vap->va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_ATIME_NOW; } if (vap->va_mtime.tv_sec != VNOVAL) { fsai->mtime = vap->va_mtime.tv_sec; fsai->mtimensec = vap->va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; if (vap->va_vaflags & VA_UTIMES_NULL) fsai->valid |= FATTR_MTIME_NOW; } if (vap->va_mode != (mode_t)VNOVAL) { fsai->mode = vap->va_mode & ALLPERMS; fsai->valid |= FATTR_MODE; } if (!fsai->valid) { goto out; } if ((err = fdisp_wait_answ(&fdi))) goto out; vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); if (vnode_vtype(vp) != vtyp) { if (vnode_vtype(vp) == VNON && vtyp != VNON) { SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! " "vnode_vtype is VNON and vtype isn't."); } else { /* * STALE vnode, ditch * * The vnode has changed its type "behind our back". * There's nothing really we can do, so let us just * force an internal revocation and tell the caller to * try again, if interested. */ fuse_internal_vnode_disappear(vp); err = EAGAIN; } } if (err == 0) { struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ; fuse_internal_cache_attrs(vp, cred, &fao->attr, fao->attr_valid, fao->attr_valid_nsec, NULL); } out: fdisp_destroy(&fdi); return err; } #ifdef ZERO_PAD_INCOMPLETE_BUFS static int isbzero(void *buf, size_t len) { int i; for (i = 0; i < len; i++) { if (((char *)buf)[i]) return (0); } return (1); } #endif Index: projects/fuse2/sys/fs/fuse/fuse_internal.h =================================================================== --- projects/fuse2/sys/fs/fuse/fuse_internal.h (revision 348486) +++ projects/fuse2/sys/fs/fuse/fuse_internal.h (revision 348487) @@ -1,311 +1,311 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2007-2009 Google Inc. and Amit Singh * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Copyright (C) 2005 Csaba Henk. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FUSE_INTERNAL_H_ #define _FUSE_INTERNAL_H_ #include #include #include #include #include "fuse_ipc.h" #include "fuse_node.h" extern u_long fuse_lookup_cache_hits; extern u_long fuse_lookup_cache_misses; static inline bool vfs_isrdonly(struct mount *mp) { return ((mp->mnt_flag & MNT_RDONLY) != 0); } static inline struct mount * vnode_mount(struct vnode *vp) { return (vp->v_mount); } static inline enum vtype vnode_vtype(struct vnode *vp) { return (vp->v_type); } static inline bool vnode_isvroot(struct vnode *vp) { return ((vp->v_vflag & VV_ROOT) != 0); } static inline bool vnode_isreg(struct vnode *vp) { return (vp->v_type == VREG); } static inline bool vnode_isdir(struct vnode *vp) { return (vp->v_type == VDIR); } static inline bool vnode_islnk(struct vnode *vp) { return (vp->v_type == VLNK); } static inline ssize_t uio_resid(struct uio *uio) { return (uio->uio_resid); } static inline off_t uio_offset(struct uio *uio) { return (uio->uio_offset); } static inline void uio_setoffset(struct uio *uio, off_t offset) { uio->uio_offset = offset; } /* miscellaneous */ static inline bool fuse_isdeadfs(struct vnode *vp) { struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); return (data->dataflags & FSESS_DEAD); } static inline uint64_t fuse_iosize(struct vnode *vp) { return (vp->v_mount->mnt_stat.f_iosize); } /* * Make a cacheable timeout in bintime format value based on a fuse_attr_out * response */ static inline void fuse_validity_2_bintime(uint64_t attr_valid, uint32_t attr_valid_nsec, struct bintime *timeout) { struct timespec now, duration, timeout_ts; getnanouptime(&now); /* "+ 2" is the bound of attr_valid_nsec + now.tv_nsec */ /* Why oh why isn't there a TIME_MAX defined? */ if (attr_valid >= INT_MAX || attr_valid + now.tv_sec + 2 >= INT_MAX) { timeout->sec = INT_MAX; } else { duration.tv_sec = attr_valid; duration.tv_nsec = attr_valid_nsec; timespecadd(&duration, &now, &timeout_ts); timespec2bintime(&timeout_ts, timeout); } } /* * Make a cacheable timeout value in timespec format based on the fuse_entry_out * response */ static inline void fuse_validity_2_timespec(const struct fuse_entry_out *feo, struct timespec *timeout) { struct timespec duration, now; getnanouptime(&now); /* "+ 2" is the bound of entry_valid_nsec + now.tv_nsec */ if (feo->entry_valid >= INT_MAX || feo->entry_valid + now.tv_sec + 2 >= INT_MAX) { timeout->tv_sec = INT_MAX; } else { duration.tv_sec = feo->entry_valid; duration.tv_nsec = feo->entry_valid_nsec; timespecadd(&duration, &now, timeout); } } /* access */ static inline int fuse_match_cred(struct ucred *basecred, struct ucred *usercred) { if (basecred->cr_uid == usercred->cr_uid && basecred->cr_uid == usercred->cr_ruid && basecred->cr_uid == usercred->cr_svuid && basecred->cr_groups[0] == usercred->cr_groups[0] && basecred->cr_groups[0] == usercred->cr_rgid && basecred->cr_groups[0] == usercred->cr_svgid) return (0); return (EPERM); } int fuse_internal_access(struct vnode *vp, accmode_t mode, struct thread *td, struct ucred *cred); /* attributes */ void fuse_internal_cache_attrs(struct vnode *vp, struct ucred *cred, struct fuse_attr *attr, uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap); /* fsync */ int fuse_internal_fsync(struct vnode *vp, struct thread *td, int waitfor, bool datasync); int fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio); /* getattr */ int fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td); int fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td); -/* readdir */ +/* asynchronous invalidation */ +int fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio); -struct pseudo_dirent { - uint32_t d_namlen; -}; - /* mknod */ int fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap); /* readdir */ +struct pseudo_dirent { + uint32_t d_namlen; +}; int fuse_internal_readdir(struct vnode *vp, struct uio *uio, off_t startoff, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *ncookies, u_long *cookies); int fuse_internal_readdir_processdata(struct uio *uio, off_t startoff, int *fnd_start, size_t reqsize, void *buf, size_t bufsize, struct fuse_iov *cookediov, int *ncookies, u_long **cookiesp); /* remove */ int fuse_internal_remove(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, enum fuse_opcode op); /* rename */ int fuse_internal_rename(struct vnode *fdvp, struct componentname *fcnp, struct vnode *tdvp, struct componentname *tcnp); /* revoke */ void fuse_internal_vnode_disappear(struct vnode *vp); /* setattr */ int fuse_internal_setattr(struct vnode *vp, struct vattr *va, struct thread *td, struct ucred *cred); /* strategy */ /* entity creation */ static inline int fuse_internal_checkentry(struct fuse_entry_out *feo, enum vtype vtyp) { if (vtyp != IFTOVT(feo->attr.mode)) { return (EINVAL); } if (feo->nodeid == FUSE_NULL_ID) { return (EINVAL); } if (feo->nodeid == FUSE_ROOT_ID) { return (EINVAL); } return (0); } int fuse_internal_newentry(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, enum vtype vtyp); void fuse_internal_newentry_makerequest(struct mount *mp, uint64_t dnid, struct componentname *cnp, enum fuse_opcode op, void *buf, size_t bufsize, struct fuse_dispatcher *fdip); int fuse_internal_newentry_core(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp, struct fuse_dispatcher *fdip); /* entity destruction */ int fuse_internal_forget_callback(struct fuse_ticket *tick, struct uio *uio); void fuse_internal_forget_send(struct mount *mp, struct thread *td, struct ucred *cred, uint64_t nodeid, uint64_t nlookup); /* fuse start/stop */ int fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio); void fuse_internal_send_init(struct fuse_data *data, struct thread *td); #endif /* _FUSE_INTERNAL_H_ */ Index: projects/fuse2/tests/sys/fs/fusefs/Makefile =================================================================== --- projects/fuse2/tests/sys/fs/fusefs/Makefile (revision 348486) +++ projects/fuse2/tests/sys/fs/fusefs/Makefile (revision 348487) @@ -1,75 +1,76 @@ # $FreeBSD$ PACKAGE= tests TESTSDIR= ${TESTSBASE}/sys/fs/fusefs # We could simply link all of these files into a single executable. But since # Kyua treats googletest programs as plain tests, it's better to separate them # out, so we get more granular reporting. GTESTS+= access GTESTS+= allow_other GTESTS+= create GTESTS+= default_permissions GTESTS+= default_permissions_privileged GTESTS+= destroy GTESTS+= dev_fuse_poll GTESTS+= fifo GTESTS+= flush GTESTS+= fsync GTESTS+= fsyncdir GTESTS+= getattr GTESTS+= interrupt GTESTS+= link GTESTS+= locks GTESTS+= lookup GTESTS+= mkdir GTESTS+= mknod GTESTS+= mount GTESTS+= nfs +GTESTS+= notify GTESTS+= open GTESTS+= opendir GTESTS+= read GTESTS+= readdir GTESTS+= readlink GTESTS+= release GTESTS+= releasedir GTESTS+= rename GTESTS+= rmdir GTESTS+= setattr GTESTS+= statfs GTESTS+= symlink GTESTS+= unlink GTESTS+= write GTESTS+= xattr .for p in ${GTESTS} SRCS.$p+= ${p}.cc SRCS.$p+= getmntopts.c SRCS.$p+= mockfs.cc SRCS.$p+= utils.cc .endfor TEST_METADATA.default_permissions+= required_user="unprivileged" TEST_METADATA.default_permissions_privileged+= required_user="root" TEST_METADATA.mknod+= required_user="root" TEST_METADATA.nfs+= required_user="root" # TODO: drastically increase timeout after test development is mostly complete TEST_METADATA+= timeout=10 FUSEFS= ${SRCTOP}/sys/fs/fuse MOUNT= ${SRCTOP}/sbin/mount CXXFLAGS+= -I${SRCTOP}/tests CXXFLAGS+= -I${FUSEFS} CXXFLAGS+= -I${MOUNT} .PATH: ${MOUNT} CXXSTD= c++14 LIBADD+= pthread LIBADD+= gmock gtest LIBADD+= util WARNS?= 6 .include Index: projects/fuse2/tests/sys/fs/fusefs/mockfs.cc =================================================================== --- projects/fuse2/tests/sys/fs/fusefs/mockfs.cc (revision 348486) +++ projects/fuse2/tests/sys/fs/fusefs/mockfs.cc (revision 348487) @@ -1,626 +1,667 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ extern "C" { #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mntopts.h" // for build_iovec } #include #include #include "mockfs.hh" using namespace testing; int verbosity = 0; const char* opcode2opname(uint32_t opcode) { const int NUM_OPS = 39; const char* table[NUM_OPS] = { "Unknown (opcode 0)", "LOOKUP", "FORGET", "GETATTR", "SETATTR", "READLINK", "SYMLINK", "Unknown (opcode 7)", "MKNOD", "MKDIR", "UNLINK", "RMDIR", "RENAME", "LINK", "OPEN", "READ", "WRITE", "STATFS", "RELEASE", "Unknown (opcode 19)", "FSYNC", "SETXATTR", "GETXATTR", "LISTXATTR", "REMOVEXATTR", "FLUSH", "INIT", "OPENDIR", "READDIR", "RELEASEDIR", "FSYNCDIR", "GETLK", "SETLK", "SETLKW", "ACCESS", "CREATE", "INTERRUPT", "BMAP", "DESTROY" }; if (opcode >= NUM_OPS) return ("Unknown (opcode > max)"); else return (table[opcode]); } ProcessMockerT ReturnErrno(int error) { return([=](auto in, auto &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; out0->header.error = -error; out0->header.len = sizeof(out0->header); out.push_back(std::move(out0)); }); } /* Helper function used for returning negative cache entries for LOOKUP */ ProcessMockerT ReturnNegativeCache(const struct timespec *entry_valid) { return([=](auto in, auto &out) { /* nodeid means ENOENT and cache it */ std::unique_ptr out0(new mockfs_buf_out); out0->body.entry.nodeid = 0; out0->header.unique = in.header.unique; out0->header.error = 0; out0->body.entry.entry_valid = entry_valid->tv_sec; out0->body.entry.entry_valid_nsec = entry_valid->tv_nsec; SET_OUT_HEADER_LEN(*out0, entry); out.push_back(std::move(out0)); }); } ProcessMockerT ReturnImmediate(std::function f) { return([=](auto& in, auto &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; f(in, *out0); out.push_back(std::move(out0)); }); } void sigint_handler(int __unused sig) { // Don't do anything except interrupt the daemon's read(2) call } -void MockFS::debug_fuseop(const mockfs_buf_in &in) +void MockFS::debug_request(const mockfs_buf_in &in) { printf("%-11s ino=%2" PRIu64, opcode2opname(in.header.opcode), in.header.nodeid); if (verbosity > 1) { printf(" uid=%5u gid=%5u pid=%5u unique=%" PRIu64 " len=%u", in.header.uid, in.header.gid, in.header.pid, in.header.unique, in.header.len); } switch (in.header.opcode) { const char *name, *value; case FUSE_ACCESS: printf(" mask=%#x", in.body.access.mask); break; case FUSE_CREATE: if (m_kernel_minor_version >= 12) name = (const char*)in.body.bytes + sizeof(fuse_create_in); else name = (const char*)in.body.bytes + sizeof(fuse_open_in); printf(" flags=%#x name=%s", in.body.open.flags, name); break; case FUSE_FLUSH: printf(" fh=%#" PRIx64 " lock_owner=%" PRIu64, in.body.flush.fh, in.body.flush.lock_owner); break; case FUSE_FORGET: printf(" nlookup=%" PRIu64, in.body.forget.nlookup); break; case FUSE_FSYNC: printf(" flags=%#x", in.body.fsync.fsync_flags); break; case FUSE_FSYNCDIR: printf(" flags=%#x", in.body.fsyncdir.fsync_flags); break; case FUSE_INTERRUPT: printf(" unique=%" PRIu64, in.body.interrupt.unique); break; case FUSE_LINK: printf(" oldnodeid=%" PRIu64, in.body.link.oldnodeid); break; case FUSE_LOOKUP: printf(" %s", in.body.lookup); break; case FUSE_MKDIR: name = (const char*)in.body.bytes + sizeof(fuse_mkdir_in); printf(" name=%s mode=%#o umask=%#o", name, in.body.mkdir.mode, in.body.mkdir.umask); break; case FUSE_MKNOD: if (m_kernel_minor_version >= 12) name = (const char*)in.body.bytes + sizeof(fuse_mknod_in); else name = (const char*)in.body.bytes + FUSE_COMPAT_MKNOD_IN_SIZE; printf(" mode=%#o rdev=%x umask=%#o name=%s", in.body.mknod.mode, in.body.mknod.rdev, in.body.mknod.umask, name); break; case FUSE_OPEN: printf(" flags=%#x", in.body.open.flags); break; case FUSE_OPENDIR: printf(" flags=%#x", in.body.opendir.flags); break; case FUSE_READ: printf(" offset=%" PRIu64 " size=%u", in.body.read.offset, in.body.read.size); if (verbosity > 1) printf(" flags=%#x", in.body.read.flags); break; case FUSE_READDIR: printf(" fh=%#" PRIx64 " offset=%" PRIu64 " size=%u", in.body.readdir.fh, in.body.readdir.offset, in.body.readdir.size); break; case FUSE_RELEASE: printf(" fh=%#" PRIx64 " flags=%#x lock_owner=%" PRIu64, in.body.release.fh, in.body.release.flags, in.body.release.lock_owner); break; case FUSE_SETATTR: if (verbosity <= 1) { printf(" valid=%#x", in.body.setattr.valid); break; } if (in.body.setattr.valid & FATTR_MODE) printf(" mode=%#o", in.body.setattr.mode); if (in.body.setattr.valid & FATTR_UID) printf(" uid=%u", in.body.setattr.uid); if (in.body.setattr.valid & FATTR_GID) printf(" gid=%u", in.body.setattr.gid); if (in.body.setattr.valid & FATTR_SIZE) printf(" size=%" PRIu64, in.body.setattr.size); if (in.body.setattr.valid & FATTR_ATIME) printf(" atime=%" PRIu64 ".%u", in.body.setattr.atime, in.body.setattr.atimensec); if (in.body.setattr.valid & FATTR_MTIME) printf(" mtime=%" PRIu64 ".%u", in.body.setattr.mtime, in.body.setattr.mtimensec); if (in.body.setattr.valid & FATTR_FH) printf(" fh=%" PRIu64 "", in.body.setattr.fh); break; case FUSE_SETLK: printf(" fh=%#" PRIx64 " owner=%" PRIu64 " type=%u pid=%u", in.body.setlk.fh, in.body.setlk.owner, in.body.setlk.lk.type, in.body.setlk.lk.pid); if (verbosity >= 2) { printf(" range=[%" PRIu64 "-%" PRIu64 "]", in.body.setlk.lk.start, in.body.setlk.lk.end); } break; case FUSE_SETXATTR: /* * In theory neither the xattr name and value need be * ASCII, but in this test suite they always are. */ name = (const char*)in.body.bytes + sizeof(fuse_setxattr_in); value = name + strlen(name) + 1; printf(" %s=%s", name, value); break; case FUSE_WRITE: printf(" fh=%#" PRIx64 " offset=%" PRIu64 " size=%u write_flags=%u", in.body.write.fh, in.body.write.offset, in.body.write.size, in.body.write.write_flags); if (verbosity > 1) printf(" flags=%#x", in.body.write.flags); break; default: break; } printf("\n"); } +/* + * Debug a FUSE response. + * + * This is mostly useful for asynchronous notifications, which don't correspond + * to any request + */ +void MockFS::debug_response(const mockfs_buf_out &out) { + const char *name; + + if (verbosity == 0) + return; + + switch (out.header.error) { + case FUSE_NOTIFY_INVAL_ENTRY: + name = (const char*)out.body.bytes + + sizeof(fuse_notify_inval_entry_out); + printf("<- INVAL_ENTRY parent=%" PRIu64 " %s\n", + out.body.inval_entry.parent, name); + break; + default: + break; + } +} + MockFS::MockFS(int max_readahead, bool allow_other, bool default_permissions, bool push_symlinks_in, bool ro, enum poll_method pm, uint32_t flags, uint32_t kernel_minor_version) { struct sigaction sa; struct iovec *iov = NULL; int iovlen = 0; char fdstr[15]; const bool trueval = true; m_daemon_id = NULL; m_kernel_minor_version = kernel_minor_version; m_maxreadahead = max_readahead; m_nready = -1; m_pm = pm; m_quit = false; if (m_pm == KQ) m_kq = kqueue(); else m_kq = -1; /* * Kyua sets pwd to a testcase-unique tempdir; no need to use * mkdtemp */ /* * googletest doesn't allow ASSERT_ in constructors, so we must throw * instead. */ if (mkdir("mountpoint" , 0755) && errno != EEXIST) throw(std::system_error(errno, std::system_category(), "Couldn't make mountpoint directory")); switch (m_pm) { case BLOCKING: m_fuse_fd = open("/dev/fuse", O_CLOEXEC | O_RDWR); break; default: m_fuse_fd = open("/dev/fuse", O_CLOEXEC | O_RDWR | O_NONBLOCK); break; } if (m_fuse_fd < 0) throw(std::system_error(errno, std::system_category(), "Couldn't open /dev/fuse")); m_pid = getpid(); m_child_pid = -1; build_iovec(&iov, &iovlen, "fstype", __DECONST(void *, "fusefs"), -1); build_iovec(&iov, &iovlen, "fspath", __DECONST(void *, "mountpoint"), -1); build_iovec(&iov, &iovlen, "from", __DECONST(void *, "/dev/fuse"), -1); sprintf(fdstr, "%d", m_fuse_fd); build_iovec(&iov, &iovlen, "fd", fdstr, -1); if (allow_other) { build_iovec(&iov, &iovlen, "allow_other", __DECONST(void*, &trueval), sizeof(bool)); } if (default_permissions) { build_iovec(&iov, &iovlen, "default_permissions", __DECONST(void*, &trueval), sizeof(bool)); } if (push_symlinks_in) { build_iovec(&iov, &iovlen, "push_symlinks_in", __DECONST(void*, &trueval), sizeof(bool)); } if (ro) { build_iovec(&iov, &iovlen, "ro", __DECONST(void*, &trueval), sizeof(bool)); } if (nmount(iov, iovlen, 0)) throw(std::system_error(errno, std::system_category(), "Couldn't mount filesystem")); // Setup default handler ON_CALL(*this, process(_, _)) .WillByDefault(Invoke(this, &MockFS::process_default)); init(flags); bzero(&sa, sizeof(sa)); sa.sa_handler = sigint_handler; sa.sa_flags = 0; /* Don't set SA_RESTART! */ if (0 != sigaction(SIGUSR1, &sa, NULL)) throw(std::system_error(errno, std::system_category(), "Couldn't handle SIGUSR1")); if (pthread_create(&m_daemon_id, NULL, service, (void*)this)) throw(std::system_error(errno, std::system_category(), "Couldn't Couldn't start fuse thread")); } MockFS::~MockFS() { kill_daemon(); if (m_daemon_id != NULL) { pthread_join(m_daemon_id, NULL); m_daemon_id = NULL; } ::unmount("mountpoint", MNT_FORCE); rmdir("mountpoint"); if (m_kq >= 0) close(m_kq); } void MockFS::init(uint32_t flags) { std::unique_ptr in(new mockfs_buf_in); std::unique_ptr out(new mockfs_buf_out); read_request(*in); ASSERT_EQ(FUSE_INIT, in->header.opcode); out->header.unique = in->header.unique; out->header.error = 0; out->body.init.major = FUSE_KERNEL_VERSION; out->body.init.minor = m_kernel_minor_version;; out->body.init.flags = in->body.init.flags & flags; /* * The default max_write is set to this formula in libfuse, though * individual filesystems can lower it. The "- 4096" was added in * commit 154ffe2, with the commit message "fix". */ uint32_t default_max_write = 32 * getpagesize() + 0x1000 - 4096; /* For testing purposes, it should be distinct from MAXPHYS */ m_max_write = MIN(default_max_write, MAXPHYS / 2); out->body.init.max_write = m_max_write; out->body.init.max_readahead = m_maxreadahead; SET_OUT_HEADER_LEN(*out, init); write(m_fuse_fd, out.get(), out->header.len); } void MockFS::kill_daemon() { m_quit = true; if (m_daemon_id != NULL) pthread_kill(m_daemon_id, SIGUSR1); // Closing the /dev/fuse file descriptor first allows unmount to // succeed even if the daemon doesn't correctly respond to commands // during the unmount sequence. close(m_fuse_fd); m_fuse_fd = -1; } void MockFS::loop() { std::vector> out; std::unique_ptr in(new mockfs_buf_in); ASSERT_TRUE(in != NULL); while (!m_quit) { bzero(in.get(), sizeof(*in)); read_request(*in); if (m_quit) break; if (verbosity > 0) - debug_fuseop(*in); + debug_request(*in); if (pid_ok((pid_t)in->header.pid)) { process(*in, out); } else { /* * Reject any requests from unknown processes. Because * we actually do mount a filesystem, plenty of * unrelated system daemons may try to access it. */ if (verbosity > 1) printf("\tREJECTED (wrong pid %d)\n", in->header.pid); process_default(*in, out); } for (auto &it: out) write_response(*it); out.clear(); } +} + +int MockFS::notify_inval_entry(ino_t parent, const char *name, size_t namelen) +{ + std::unique_ptr out(new mockfs_buf_out); + + out->header.unique = 0; /* 0 means asynchronous notification */ + out->header.error = FUSE_NOTIFY_INVAL_ENTRY; + out->body.inval_entry.parent = parent; + out->body.inval_entry.namelen = namelen; + strlcpy((char*)&out->body.bytes + sizeof(out->body.inval_entry), + name, sizeof(out->body.bytes) - sizeof(out->body.inval_entry)); + out->header.len = sizeof(out->header) + sizeof(out->body.inval_entry) + + namelen; + debug_response(*out); + write_response(*out); + return 0; } bool MockFS::pid_ok(pid_t pid) { if (pid == m_pid) { return (true); } else if (pid == m_child_pid) { return (true); } else { struct kinfo_proc *ki; bool ok = false; ki = kinfo_getproc(pid); if (ki == NULL) return (false); /* * Allow access by the aio daemon processes so that our tests * can use aio functions */ if (0 == strncmp("aiod", ki->ki_comm, 4)) ok = true; free(ki); return (ok); } } void MockFS::process_default(const mockfs_buf_in& in, std::vector> &out) { std::unique_ptr out0(new mockfs_buf_out); out0->header.unique = in.header.unique; out0->header.error = -EOPNOTSUPP; out0->header.len = sizeof(out0->header); out.push_back(std::move(out0)); } void MockFS::read_request(mockfs_buf_in &in) { ssize_t res; int nready = 0; fd_set readfds; pollfd fds[1]; struct kevent changes[1]; struct kevent events[1]; struct timespec timeout_ts; struct timeval timeout_tv; const int timeout_ms = 999; int timeout_int, nfds; switch (m_pm) { case BLOCKING: break; case KQ: timeout_ts.tv_sec = 0; timeout_ts.tv_nsec = timeout_ms * 1'000'000; while (nready == 0) { EV_SET(&changes[0], m_fuse_fd, EVFILT_READ, EV_ADD, 0, 0, 0); nready = kevent(m_kq, &changes[0], 1, &events[0], 1, &timeout_ts); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(events[0].ident, (uintptr_t)m_fuse_fd); if (events[0].flags & EV_ERROR) FAIL() << strerror(events[0].data); else if (events[0].flags & EV_EOF) FAIL() << strerror(events[0].fflags); m_nready = events[0].data; break; case POLL: timeout_int = timeout_ms; fds[0].fd = m_fuse_fd; fds[0].events = POLLIN; while (nready == 0) { nready = poll(fds, 1, timeout_int); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_TRUE(fds[0].revents & POLLIN); break; case SELECT: timeout_tv.tv_sec = 0; timeout_tv.tv_usec = timeout_ms * 1'000; nfds = m_fuse_fd + 1; while (nready == 0) { FD_ZERO(&readfds); FD_SET(m_fuse_fd, &readfds); nready = select(nfds, &readfds, NULL, NULL, &timeout_tv); if (m_quit) return; } ASSERT_LE(0, nready) << strerror(errno); ASSERT_TRUE(FD_ISSET(m_fuse_fd, &readfds)); break; default: FAIL() << "not yet implemented"; } res = read(m_fuse_fd, &in, sizeof(in)); if (res < 0 && !m_quit) perror("read"); ASSERT_TRUE(res >= static_cast(sizeof(in.header)) || m_quit); } void MockFS::write_response(const mockfs_buf_out &out) { fd_set writefds; pollfd fds[1]; int nready, nfds; ssize_t r; switch (m_pm) { case BLOCKING: case KQ: /* EVFILT_WRITE is not supported */ break; case POLL: fds[0].fd = m_fuse_fd; fds[0].events = POLLOUT; nready = poll(fds, 1, INFTIM); ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(1, nready) << "NULL timeout expired?"; ASSERT_TRUE(fds[0].revents & POLLOUT); break; case SELECT: FD_ZERO(&writefds); FD_SET(m_fuse_fd, &writefds); nfds = m_fuse_fd + 1; nready = select(nfds, NULL, &writefds, NULL, NULL); ASSERT_LE(0, nready) << strerror(errno); ASSERT_EQ(1, nready) << "NULL timeout expired?"; ASSERT_TRUE(FD_ISSET(m_fuse_fd, &writefds)); break; default: FAIL() << "not yet implemented"; } r = write(m_fuse_fd, &out, out.header.len); ASSERT_TRUE(r > 0 || errno == EAGAIN) << strerror(errno); } void* MockFS::service(void *pthr_data) { MockFS *mock_fs = (MockFS*)pthr_data; mock_fs->loop(); return (NULL); } void MockFS::unmount() { ::unmount("mountpoint", 0); } Index: projects/fuse2/tests/sys/fs/fusefs/mockfs.hh =================================================================== --- projects/fuse2/tests/sys/fs/fusefs/mockfs.hh (revision 348486) +++ projects/fuse2/tests/sys/fs/fusefs/mockfs.hh (revision 348487) @@ -1,326 +1,342 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 The FreeBSD Foundation * * This software was developed by BFF Storage Systems, LLC under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ extern "C" { #include #include #include "fuse_kernel.h" } #include #define TIME_T_MAX (std::numeric_limits::max()) /* * A pseudo-fuse errno used indicate that a fuse operation should have no * response, at least not immediately */ #define FUSE_NORESPONSE 9999 #define SET_OUT_HEADER_LEN(out, variant) { \ (out).header.len = (sizeof((out).header) + \ sizeof((out).body.variant)); \ } /* * Create an expectation on FUSE_LOOKUP and return it so the caller can set * actions. * * This must be a macro instead of a method because EXPECT_CALL returns a type * with a deleted constructor. */ #define EXPECT_LOOKUP(parent, path) \ EXPECT_CALL(*m_mock, process( \ ResultOf([=](auto in) { \ return (in.header.opcode == FUSE_LOOKUP && \ in.header.nodeid == (parent) && \ strcmp(in.body.lookup, (path)) == 0); \ }, Eq(true)), \ _) \ ) extern int verbosity; /* This struct isn't defined by fuse_kernel.h or libfuse, but it should be */ struct fuse_create_out { struct fuse_entry_out entry; struct fuse_open_out open; }; /* Protocol 7.8 version of struct fuse_attr */ struct fuse_attr_7_8 { __u64 ino; __u64 size; __u64 blocks; __u64 atime; __u64 mtime; __u64 ctime; __u32 atimensec; __u32 mtimensec; __u32 ctimensec; __u32 mode; __u32 nlink; __u32 uid; __u32 gid; __u32 rdev; }; /* Protocol 7.8 version of struct fuse_attr_out */ struct fuse_attr_out_7_8 { __u64 attr_valid; __u32 attr_valid_nsec; __u32 dummy; struct fuse_attr_7_8 attr; }; /* Protocol 7.8 version of struct fuse_entry_out */ struct fuse_entry_out_7_8 { __u64 nodeid; /* Inode ID */ __u64 generation; /* Inode generation: nodeid:gen must be unique for the fs's lifetime */ __u64 entry_valid; /* Cache timeout for the name */ __u64 attr_valid; /* Cache timeout for the attributes */ __u32 entry_valid_nsec; __u32 attr_valid_nsec; struct fuse_attr_7_8 attr; }; /* Output struct for FUSE_CREATE for protocol 7.8 servers */ struct fuse_create_out_7_8 { struct fuse_entry_out_7_8 entry; struct fuse_open_out open; }; union fuse_payloads_in { fuse_access_in access; /* value is from fuse_kern_chan.c in fusefs-libs */ uint8_t bytes[0x21000 - sizeof(struct fuse_in_header)]; fuse_create_in create; fuse_flush_in flush; fuse_fsync_in fsync; fuse_fsync_in fsyncdir; fuse_forget_in forget; fuse_interrupt_in interrupt; fuse_lk_in getlk; fuse_getxattr_in getxattr; fuse_init_in init; fuse_link_in link; fuse_listxattr_in listxattr; char lookup[0]; fuse_mkdir_in mkdir; fuse_mknod_in mknod; fuse_open_in open; fuse_open_in opendir; fuse_read_in read; fuse_read_in readdir; fuse_release_in release; fuse_release_in releasedir; fuse_rename_in rename; char rmdir[0]; fuse_setattr_in setattr; fuse_setxattr_in setxattr; fuse_lk_in setlk; fuse_lk_in setlkw; char unlink[0]; fuse_write_in write; }; struct mockfs_buf_in { fuse_in_header header; union fuse_payloads_in body; }; union fuse_payloads_out { fuse_attr_out attr; fuse_attr_out_7_8 attr_7_8; fuse_create_out create; fuse_create_out_7_8 create_7_8; /* * The protocol places no limits on the size of bytes. Choose * a size big enough for anything we'll test. */ uint8_t bytes[0x20000]; fuse_entry_out entry; fuse_entry_out_7_8 entry_7_8; fuse_lk_out getlk; fuse_getxattr_out getxattr; fuse_init_out init; + /* The inval_entry structure should be followed by the entry's name */ + fuse_notify_inval_entry_out inval_entry; fuse_listxattr_out listxattr; fuse_open_out open; fuse_statfs_out statfs; /* * The protocol places no limits on the length of the string. This is * merely convenient for testing. */ char str[80]; fuse_write_out write; }; struct mockfs_buf_out { fuse_out_header header; union fuse_payloads_out body; /* Default constructor: zero everything */ mockfs_buf_out() { memset(this, 0, sizeof(*this)); } }; /* A function that can be invoked in place of MockFS::process */ typedef std::function> &out)> ProcessMockerT; /* * Helper function used for setting an error expectation for any fuse operation. * The operation will return the supplied error */ ProcessMockerT ReturnErrno(int error); /* Helper function used for returning negative cache entries for LOOKUP */ ProcessMockerT ReturnNegativeCache(const struct timespec *entry_valid); /* Helper function used for returning a single immediate response */ ProcessMockerT ReturnImmediate( std::function f); /* How the daemon should check /dev/fuse for readiness */ enum poll_method { BLOCKING, SELECT, POLL, KQ }; /* * Fake FUSE filesystem * * "Mounts" a filesystem to a temporary directory and services requests * according to the programmed expectations. * * Operates directly on the fusefs(4) kernel API, not the libfuse(3) user api. */ class MockFS { /* * thread id of the fuse daemon thread * * It must run in a separate thread so it doesn't deadlock with the * client test code. */ pthread_t m_daemon_id; /* file descriptor of /dev/fuse control device */ int m_fuse_fd; /* The minor version of the kernel API that this mock daemon targets */ uint32_t m_kernel_minor_version; int m_kq; /* The max_readahead filesystem option */ uint32_t m_maxreadahead; /* pid of the test process */ pid_t m_pid; /* Method the daemon should use for I/O to and from /dev/fuse */ enum poll_method m_pm; - void debug_fuseop(const mockfs_buf_in&); + void debug_request(const mockfs_buf_in&); + void debug_response(const mockfs_buf_out&); /* Initialize a session after mounting */ void init(uint32_t flags); /* Is pid from a process that might be involved in the test? */ bool pid_ok(pid_t pid); /* Default request handler */ void process_default(const mockfs_buf_in&, std::vector>&); /* Entry point for the daemon thread */ static void* service(void*); /* Read, but do not process, a single request from the kernel */ void read_request(mockfs_buf_in& in); /* Write a single response back to the kernel */ void write_response(const mockfs_buf_out &out); public: /* pid of child process, for two-process test cases */ pid_t m_child_pid; /* Maximum size of a FUSE_WRITE write */ uint32_t m_max_write; /* * Number of events that were available from /dev/fuse after the last * kevent call. Only valid when m_pm = KQ. */ int m_nready; /* Tell the daemon to shut down ASAP */ bool m_quit; /* Create a new mockfs and mount it to a tempdir */ MockFS(int max_readahead, bool allow_other, bool default_permissions, bool push_symlinks_in, bool ro, enum poll_method pm, uint32_t flags, uint32_t kernel_minor_version); virtual ~MockFS(); /* Kill the filesystem daemon without unmounting the filesystem */ void kill_daemon(); /* Process FUSE requests endlessly */ void loop(); + + /* + * Send an asynchronous notification to invalidate a directory entry. + * Similar to libfuse's fuse_lowlevel_notify_inval_entry + * + * This method will block until the client has responded, so it should + * generally be run in a separate thread from request processing. + * + * @param parent Parent directory's inode number + * @param name name of dirent to invalidate + * @param namelen size of name, including the NUL + */ + int notify_inval_entry(ino_t parent, const char *name, size_t namelen); /* * Request handler * * This method is expected to provide the responses to each FUSE * operation. For an immediate response, push one buffer into out. * For a delayed response, push nothing. For an immediate response * plus a delayed response to an earlier operation, push two bufs. * Test cases must define each response using Googlemock expectations */ MOCK_METHOD2(process, void(const mockfs_buf_in&, std::vector>&)); /* Gracefully unmount */ void unmount(); }; Index: projects/fuse2/tests/sys/fs/fusefs/notify.cc =================================================================== --- projects/fuse2/tests/sys/fs/fusefs/notify.cc (nonexistent) +++ projects/fuse2/tests/sys/fs/fusefs/notify.cc (revision 348487) @@ -0,0 +1,240 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 The FreeBSD Foundation + * + * This software was developed by BFF Storage Systems, LLC under sponsorship + * from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +extern "C" { +#include +} + +#include "mockfs.hh" +#include "utils.hh" + +using namespace testing; + +/* + * FUSE asynchonous notification + * + * FUSE servers can send unprompted notification messages for things like cache + * invalidation. This file tests our client's handling of those messages. + */ + +class Notify: public FuseTest { +public: +public: +virtual void SetUp() { + m_init_flags = FUSE_EXPORT_SUPPORT; + FuseTest::SetUp(); +} + +void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino, + Sequence &seq) +{ + EXPECT_LOOKUP(parent, relpath) + .InSequence(seq) + .WillOnce(Invoke( + ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, entry); + out.body.entry.attr.mode = S_IFREG | 0644; + out.body.entry.nodeid = ino; + out.body.entry.attr.ino = ino; + out.body.entry.attr.nlink = 1; + out.body.entry.attr_valid = UINT64_MAX; + out.body.entry.entry_valid = UINT64_MAX; + }))); +} +}; + +struct inval_entry_args { + MockFS *mock; + ino_t parent; + const char *name; + size_t namelen; +}; + +static void* inval_entry(void* arg) { + const struct inval_entry_args *iea = (struct inval_entry_args*)arg; + ssize_t r; + + r = iea->mock->notify_inval_entry(iea->parent, iea->name, iea->namelen); + if (r >= 0) + return 0; + else + return (void*)(intptr_t)errno; +} + +/* Invalidate a nonexistent entry */ +TEST_F(Notify, inval_entry_nonexistent) +{ + const static char *name = "foo"; + struct inval_entry_args iea; + void *thr0_value; + pthread_t th0; + + iea.mock = m_mock; + iea.parent = FUSE_ROOT_ID; + iea.name = name; + iea.namelen = strlen(name); + ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea)) + << strerror(errno); + pthread_join(th0, &thr0_value); + /* It's not an error for an entry to not be cached */ + EXPECT_EQ(0, (intptr_t)thr0_value); +} + +/* Invalidate a cached entry */ +TEST_F(Notify, inval_entry) +{ + const static char FULLPATH[] = "mountpoint/foo"; + const static char RELPATH[] = "foo"; + struct inval_entry_args iea; + struct stat sb; + void *thr0_value; + uint64_t ino0 = 42; + uint64_t ino1 = 43; + Sequence seq; + pthread_t th0; + + expect_lookup(FUSE_ROOT_ID, RELPATH, ino0, seq); + expect_lookup(FUSE_ROOT_ID, RELPATH, ino1, seq); + + /* Fill the entry cache */ + ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); + EXPECT_EQ(ino0, sb.st_ino); + + /* Now invalidate the entry */ + iea.mock = m_mock; + iea.parent = FUSE_ROOT_ID; + iea.name = RELPATH; + iea.namelen = strlen(RELPATH); + ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea)) + << strerror(errno); + pthread_join(th0, &thr0_value); + /* It's not an error for an entry to not be cached */ + EXPECT_EQ(0, (intptr_t)thr0_value); + + /* The second lookup should return the alternate ino */ + ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); + EXPECT_EQ(ino1, sb.st_ino); +} + +/* + * Invalidate a cached entry beneath the root, which uses a slightly different + * code path. + */ +TEST_F(Notify, inval_entry_below_root) +{ + const static char FULLPATH[] = "mountpoint/some_dir/foo"; + const static char DNAME[] = "some_dir"; + const static char FNAME[] = "foo"; + struct inval_entry_args iea; + struct stat sb; + void *thr0_value; + uint64_t dir_ino = 41; + uint64_t ino0 = 42; + uint64_t ino1 = 43; + Sequence seq; + pthread_t th0; + + EXPECT_LOOKUP(FUSE_ROOT_ID, DNAME) + .WillOnce(Invoke( + ReturnImmediate([=](auto in __unused, auto& out) { + SET_OUT_HEADER_LEN(out, entry); + out.body.entry.attr.mode = S_IFDIR | 0755; + out.body.entry.nodeid = dir_ino; + out.body.entry.attr.nlink = 2; + out.body.entry.attr_valid = UINT64_MAX; + out.body.entry.entry_valid = UINT64_MAX; + }))); + expect_lookup(dir_ino, FNAME, ino0, seq); + expect_lookup(dir_ino, FNAME, ino1, seq); + + /* Fill the entry cache */ + ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); + EXPECT_EQ(ino0, sb.st_ino); + + /* Now invalidate the entry */ + iea.mock = m_mock; + iea.parent = dir_ino; + iea.name = FNAME; + iea.namelen = strlen(FNAME); + ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea)) + << strerror(errno); + pthread_join(th0, &thr0_value); + /* It's not an error for an entry to not be cached */ + EXPECT_EQ(0, (intptr_t)thr0_value); + + /* The second lookup should return the alternate ino */ + ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); + EXPECT_EQ(ino1, sb.st_ino); +} + +/* Invalidating an entry invalidates the parent directory's attributes */ +TEST_F(Notify, inval_entry_invalidates_parent_attrs) +{ + const static char FULLPATH[] = "mountpoint/foo"; + const static char RELPATH[] = "foo"; + struct inval_entry_args iea; + struct stat sb; + void *thr0_value; + uint64_t ino = 42; + Sequence seq; + pthread_t th0; + + expect_lookup(FUSE_ROOT_ID, RELPATH, ino, seq); + EXPECT_CALL(*m_mock, process( + ResultOf([=](auto in) { + return (in.header.opcode == FUSE_GETATTR && + in.header.nodeid == FUSE_ROOT_ID); + }, Eq(true)), + _) + ).Times(2) + .WillRepeatedly(Invoke(ReturnImmediate([=](auto i __unused, auto& out) { + SET_OUT_HEADER_LEN(out, attr); + out.body.attr.attr.mode = S_IFDIR | 0755; + out.body.attr.attr_valid = UINT64_MAX; + }))); + + /* Fill the attr and entry cache */ + ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno); + ASSERT_EQ(0, stat(FULLPATH, &sb)) << strerror(errno); + + /* Now invalidate the entry */ + iea.mock = m_mock; + iea.parent = FUSE_ROOT_ID; + iea.name = RELPATH; + iea.namelen = strlen(RELPATH); + ASSERT_EQ(0, pthread_create(&th0, NULL, inval_entry, &iea)) + << strerror(errno); + pthread_join(th0, &thr0_value); + /* It's not an error for an entry to not be cached */ + EXPECT_EQ(0, (intptr_t)thr0_value); + + /* /'s attribute cache should be cleared */ + ASSERT_EQ(0, stat("mountpoint", &sb)) << strerror(errno); +} Property changes on: projects/fuse2/tests/sys/fs/fusefs/notify.cc ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property