diff --git a/sys/fs/tarfs/tarfs_io.c b/sys/fs/tarfs/tarfs_io.c index 87885fbb329c..6127299b23c1 100644 --- a/sys/fs/tarfs/tarfs_io.c +++ b/sys/fs/tarfs/tarfs_io.c @@ -1,731 +1,729 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Juniper Networks, Inc. * Copyright (c) 2022-2023 Klara, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_tarfs.h" #include "opt_zstdio.h" #include #include #include #include #include #include #include #include #include #include #ifdef ZSTDIO #define ZSTD_STATIC_LINKING_ONLY #include #endif #include #include #ifdef TARFS_DEBUG SYSCTL_NODE(_vfs_tarfs, OID_AUTO, zio, CTLFLAG_RD, 0, "Tar filesystem decompression layer"); COUNTER_U64_DEFINE_EARLY(tarfs_zio_inflated); SYSCTL_COUNTER_U64(_vfs_tarfs_zio, OID_AUTO, inflated, CTLFLAG_RD, &tarfs_zio_inflated, "Amount of compressed data inflated."); COUNTER_U64_DEFINE_EARLY(tarfs_zio_consumed); SYSCTL_COUNTER_U64(_vfs_tarfs_zio, OID_AUTO, consumed, CTLFLAG_RD, &tarfs_zio_consumed, "Amount of compressed data consumed."); COUNTER_U64_DEFINE_EARLY(tarfs_zio_bounced); SYSCTL_COUNTER_U64(_vfs_tarfs_zio, OID_AUTO, bounced, CTLFLAG_RD, &tarfs_zio_bounced, "Amount of decompressed data bounced."); static int tarfs_sysctl_handle_zio_reset(SYSCTL_HANDLER_ARGS) { unsigned int tmp; int error; tmp = 0; if ((error = SYSCTL_OUT(req, &tmp, sizeof(tmp))) != 0) return (error); if (req->newptr != NULL) { if ((error = SYSCTL_IN(req, &tmp, sizeof(tmp))) != 0) return (error); counter_u64_zero(tarfs_zio_inflated); counter_u64_zero(tarfs_zio_consumed); counter_u64_zero(tarfs_zio_bounced); } return (0); } SYSCTL_PROC(_vfs_tarfs_zio, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, tarfs_sysctl_handle_zio_reset, "IU", "Reset compression counters."); #endif MALLOC_DEFINE(M_TARFSZSTATE, "tarfs zstate", "tarfs decompression state"); MALLOC_DEFINE(M_TARFSZBUF, "tarfs zbuf", "tarfs decompression buffers"); #define XZ_MAGIC (uint8_t[]){ 0xfd, 0x37, 0x7a, 0x58, 0x5a } #define ZLIB_MAGIC (uint8_t[]){ 0x1f, 0x8b, 0x08 } #define ZSTD_MAGIC (uint8_t[]){ 0x28, 0xb5, 0x2f, 0xfd } #ifdef ZSTDIO struct tarfs_zstd { ZSTD_DStream *zds; }; #endif /* XXX review use of curthread / uio_td / td_cred */ /* * Reads from the tar file according to the provided uio. If the archive * is compressed and raw is false, reads the decompressed stream; * otherwise, reads directly from the original file. Returns 0 on success * and a positive errno value on failure. */ int tarfs_io_read(struct tarfs_mount *tmp, bool raw, struct uio *uiop) { void *rl = NULL; off_t off = uiop->uio_offset; size_t len = uiop->uio_resid; int error; if (raw || tmp->znode == NULL) { rl = vn_rangelock_rlock(tmp->vp, off, off + len); error = vn_lock(tmp->vp, LK_SHARED); if (error == 0) { error = VOP_READ(tmp->vp, uiop, IO_DIRECT|IO_NODELOCKED, uiop->uio_td->td_ucred); VOP_UNLOCK(tmp->vp); } vn_rangelock_unlock(tmp->vp, rl); } else { error = vn_lock(tmp->znode, LK_EXCLUSIVE); if (error == 0) { error = VOP_READ(tmp->znode, uiop, IO_DIRECT | IO_NODELOCKED, uiop->uio_td->td_ucred); VOP_UNLOCK(tmp->znode); } } TARFS_DPF(IO, "%s(%zu, %zu) = %d (resid %zd)\n", __func__, (size_t)off, len, error, uiop->uio_resid); return (error); } /* * Reads from the tar file into the provided buffer. If the archive is * compressed and raw is false, reads the decompressed stream; otherwise, * reads directly from the original file. Returns the number of bytes * read on success, 0 on EOF, and a negative errno value on failure. */ ssize_t tarfs_io_read_buf(struct tarfs_mount *tmp, bool raw, void *buf, off_t off, size_t len) { struct uio auio; struct iovec aiov; ssize_t res; int error; if (len == 0) { TARFS_DPF(IO, "%s(%zu, %zu) null\n", __func__, (size_t)off, len); return (0); } aiov.iov_base = buf; aiov.iov_len = len; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = off; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_resid = len; auio.uio_td = curthread; error = tarfs_io_read(tmp, raw, &auio); if (error != 0) { TARFS_DPF(IO, "%s(%zu, %zu) error %d\n", __func__, (size_t)off, len, error); return (-error); } res = len - auio.uio_resid; if (res == 0 && len != 0) { TARFS_DPF(IO, "%s(%zu, %zu) eof\n", __func__, (size_t)off, len); } else { TARFS_DPF(IO, "%s(%zu, %zu) read %zd | %*D\n", __func__, (size_t)off, len, res, (int)(res > 8 ? 8 : res), (uint8_t *)buf, " "); } return (res); } #ifdef ZSTDIO static void * tarfs_zstate_alloc(void *opaque, size_t size) { (void)opaque; return (malloc(size, M_TARFSZSTATE, M_WAITOK)); } #endif #ifdef ZSTDIO static void tarfs_zstate_free(void *opaque, void *address) { (void)opaque; free(address, M_TARFSZSTATE); } #endif #ifdef ZSTDIO static ZSTD_customMem tarfs_zstd_mem = { tarfs_zstate_alloc, tarfs_zstate_free, NULL, }; #endif /* * Updates the decompression frame index, recording the current input and * output offsets in a new index entry, and growing the index if * necessary. */ static void tarfs_zio_update_index(struct tarfs_zio *zio, off_t i, off_t o) { if (++zio->curidx >= zio->nidx) { if (++zio->nidx > zio->szidx) { zio->szidx *= 2; zio->idx = realloc(zio->idx, zio->szidx * sizeof(*zio->idx), M_TARFSZSTATE, M_ZERO | M_WAITOK); TARFS_DPF(ALLOC, "%s: resized zio index\n", __func__); } zio->idx[zio->curidx].i = i; zio->idx[zio->curidx].o = o; TARFS_DPF(ZIDX, "%s: index %u = i %zu o %zu\n", __func__, zio->curidx, (size_t)zio->idx[zio->curidx].i, (size_t)zio->idx[zio->curidx].o); } MPASS(zio->idx[zio->curidx].i == i); MPASS(zio->idx[zio->curidx].o == o); } /* * VOP_ACCESS for zio node. */ static int tarfs_zaccess(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct tarfs_zio *zio = vp->v_data; struct tarfs_mount *tmp = zio->tmp; accmode_t accmode = ap->a_accmode; int error = EPERM; if (accmode == VREAD) { error = vn_lock(tmp->vp, LK_SHARED); if (error == 0) { error = VOP_ACCESS(tmp->vp, accmode, ap->a_cred, ap->a_td); VOP_UNLOCK(tmp->vp); } } TARFS_DPF(ZIO, "%s(%d) = %d\n", __func__, accmode, error); return (error); } /* * VOP_GETATTR for zio node. */ static int tarfs_zgetattr(struct vop_getattr_args *ap) { struct vattr va; struct vnode *vp = ap->a_vp; struct tarfs_zio *zio = vp->v_data; struct tarfs_mount *tmp = zio->tmp; struct vattr *vap = ap->a_vap; int error = 0; VATTR_NULL(vap); error = vn_lock(tmp->vp, LK_SHARED); if (error == 0) { error = VOP_GETATTR(tmp->vp, &va, ap->a_cred); VOP_UNLOCK(tmp->vp); if (error == 0) { vap->va_type = VREG; vap->va_mode = va.va_mode; vap->va_nlink = 1; vap->va_gid = va.va_gid; vap->va_uid = va.va_uid; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; vap->va_fileid = TARFS_ZIOINO; vap->va_size = zio->idx[zio->nidx - 1].o; vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; vap->va_atime = va.va_atime; vap->va_ctime = va.va_ctime; vap->va_mtime = va.va_mtime; vap->va_birthtime = tmp->root->birthtime; vap->va_bytes = va.va_bytes; } } TARFS_DPF(ZIO, "%s() = %d\n", __func__, error); return (error); } #ifdef ZSTDIO /* * VOP_READ for zio node, zstd edition. */ static int tarfs_zread_zstd(struct tarfs_zio *zio, struct uio *uiop) { void *ibuf = NULL, *obuf = NULL, *rl = NULL; struct uio auio; struct iovec aiov; struct tarfs_mount *tmp = zio->tmp; struct tarfs_zstd *zstd = zio->zstd; struct thread *td = curthread; ZSTD_inBuffer zib; ZSTD_outBuffer zob; off_t zsize; off_t ipos, opos; size_t ilen, olen; size_t zerror; off_t off = uiop->uio_offset; size_t len = uiop->uio_resid; size_t resid = uiop->uio_resid; size_t bsize; int error; bool reset = false; /* do we have to rewind? */ if (off < zio->opos) { while (zio->curidx > 0 && off < zio->idx[zio->curidx].o) zio->curidx--; reset = true; } /* advance to the nearest index entry */ if (off > zio->opos) { // XXX maybe do a binary search instead while (zio->curidx < zio->nidx - 1 && off >= zio->idx[zio->curidx + 1].o) { zio->curidx++; reset = true; } } /* reset the decompression stream if needed */ if (reset) { zio->ipos = zio->idx[zio->curidx].i; zio->opos = zio->idx[zio->curidx].o; ZSTD_resetDStream(zstd->zds); TARFS_DPF(ZIDX, "%s: skipping to index %u = i %zu o %zu\n", __func__, zio->curidx, (size_t)zio->ipos, (size_t)zio->opos); } else { TARFS_DPF(ZIDX, "%s: continuing at i %zu o %zu\n", __func__, (size_t)zio->ipos, (size_t)zio->opos); } /* * Set up a temporary buffer for compressed data. Use the size * recommended by the zstd library; this is usually 128 kB, but * just in case, make sure it's a multiple of the page size and no * larger than MAXBSIZE. */ bsize = roundup(ZSTD_CStreamOutSize(), PAGE_SIZE); if (bsize > MAXBSIZE) bsize = MAXBSIZE; ibuf = malloc(bsize, M_TEMP, M_WAITOK); zib.src = NULL; zib.size = 0; zib.pos = 0; /* * Set up the decompression buffer. If the target is not in * kernel space, we will have to set up a bounce buffer. * * TODO: to avoid using a bounce buffer, map destination pages * using vm_fault_quick_hold_pages(). */ MPASS(zio->opos <= off); MPASS(uiop->uio_iovcnt == 1); MPASS(uiop->uio_iov->iov_len >= len); if (uiop->uio_segflg == UIO_SYSSPACE) { zob.dst = uiop->uio_iov->iov_base; } else { TARFS_DPF(ALLOC, "%s: allocating %zu-byte bounce buffer\n", __func__, len); zob.dst = obuf = malloc(len, M_TEMP, M_WAITOK); } zob.size = len; zob.pos = 0; /* lock tarball */ rl = vn_rangelock_rlock(tmp->vp, zio->ipos, OFF_MAX); error = vn_lock(tmp->vp, LK_SHARED); if (error != 0) { goto fail_unlocked; } /* check size */ error = vn_getsize_locked(tmp->vp, &zsize, td->td_ucred); if (error != 0) { goto fail; } if (zio->ipos >= zsize) { /* beyond EOF */ goto fail; } while (resid > 0) { if (zib.pos == zib.size) { /* request data from the underlying file */ aiov.iov_base = ibuf; aiov.iov_len = bsize; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = zio->ipos; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_resid = aiov.iov_len; auio.uio_td = td; error = VOP_READ(tmp->vp, &auio, IO_DIRECT | IO_NODELOCKED, td->td_ucred); if (error != 0) goto fail; TARFS_DPF(ZIO, "%s: req %zu+%zu got %zu+%zu\n", __func__, (size_t)zio->ipos, bsize, (size_t)zio->ipos, bsize - auio.uio_resid); zib.src = ibuf; zib.size = bsize - auio.uio_resid; zib.pos = 0; } MPASS(zib.pos <= zib.size); if (zib.pos == zib.size) { TARFS_DPF(ZIO, "%s: end of file after i %zu o %zu\n", __func__, (size_t)zio->ipos, (size_t)zio->opos); goto fail; } if (zio->opos < off) { /* to be discarded */ zob.size = min(off - zio->opos, len); zob.pos = 0; } else { zob.size = len; zob.pos = zio->opos - off; } ipos = zib.pos; opos = zob.pos; /* decompress as much as possible */ zerror = ZSTD_decompressStream(zstd->zds, &zob, &zib); zio->ipos += ilen = zib.pos - ipos; zio->opos += olen = zob.pos - opos; if (zio->opos > off) resid -= olen; if (ZSTD_isError(zerror)) { TARFS_DPF(ZIO, "%s: inflate failed after i %zu o %zu: %s\n", __func__, (size_t)zio->ipos, (size_t)zio->opos, ZSTD_getErrorName(zerror)); error = EIO; goto fail; } if (zerror == 0 && olen == 0) { TARFS_DPF(ZIO, "%s: end of stream after i %zu o %zu\n", __func__, (size_t)zio->ipos, (size_t)zio->opos); break; } if (zerror == 0) { TARFS_DPF(ZIO, "%s: end of frame after i %zu o %zu\n", __func__, (size_t)zio->ipos, (size_t)zio->opos); tarfs_zio_update_index(zio, zio->ipos, zio->opos); } TARFS_DPF(ZIO, "%s: inflated %zu\n", __func__, olen); #ifdef TARFS_DEBUG counter_u64_add(tarfs_zio_inflated, olen); #endif } fail: VOP_UNLOCK(tmp->vp); fail_unlocked: if (error == 0) { if (uiop->uio_segflg == UIO_SYSSPACE) { uiop->uio_resid = resid; } else if (len > resid) { TARFS_DPF(ALLOC, "%s: bounced %zu bytes\n", __func__, len - resid); error = uiomove(obuf, len - resid, uiop); #ifdef TARFS_DEBUG counter_u64_add(tarfs_zio_bounced, len - resid); #endif } } if (obuf != NULL) { TARFS_DPF(ALLOC, "%s: freeing bounce buffer\n", __func__); free(obuf, M_TEMP); } if (rl != NULL) vn_rangelock_unlock(tmp->vp, rl); if (ibuf != NULL) free(ibuf, M_TEMP); TARFS_DPF(ZIO, "%s(%zu, %zu) = %d (resid %zd)\n", __func__, (size_t)off, len, error, uiop->uio_resid); #ifdef TARFS_DEBUG counter_u64_add(tarfs_zio_consumed, len - uiop->uio_resid); #endif if (error != 0) { zio->curidx = 0; zio->ipos = zio->idx[0].i; zio->opos = zio->idx[0].o; ZSTD_resetDStream(zstd->zds); } return (error); } #endif /* * VOP_READ for zio node. */ static int tarfs_zread(struct vop_read_args *ap) { #if defined(TARFS_DEBUG) || defined(ZSTDIO) struct vnode *vp = ap->a_vp; struct tarfs_zio *zio = vp->v_data; struct uio *uiop = ap->a_uio; #endif #ifdef TARFS_DEBUG off_t off = uiop->uio_offset; size_t len = uiop->uio_resid; #endif int error; TARFS_DPF(ZIO, "%s(%zu, %zu)\n", __func__, (size_t)off, len); #ifdef ZSTDIO if (zio->zstd != NULL) { error = tarfs_zread_zstd(zio, uiop); } else #endif error = EFTYPE; TARFS_DPF(ZIO, "%s(%zu, %zu) = %d (resid %zd)\n", __func__, (size_t)off, len, error, uiop->uio_resid); return (error); } /* * VOP_RECLAIM for zio node. */ static int tarfs_zreclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; TARFS_DPF(ZIO, "%s(%p)\n", __func__, vp); vp->v_data = NULL; - vnode_destroy_vobject(vp); - cache_purge(vp); return (0); } /* * VOP_STRATEGY for zio node. */ static int tarfs_zstrategy(struct vop_strategy_args *ap) { struct uio auio; struct iovec iov; struct vnode *vp = ap->a_vp; struct buf *bp = ap->a_bp; off_t off; size_t len; int error; iov.iov_base = bp->b_data; iov.iov_len = bp->b_bcount; off = bp->b_iooffset; len = bp->b_bcount; bp->b_resid = len; auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_offset = off; auio.uio_resid = len; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_td = curthread; error = VOP_READ(vp, &auio, IO_DIRECT | IO_NODELOCKED, bp->b_rcred); bp->b_flags |= B_DONE; if (error != 0) { bp->b_ioflags |= BIO_ERROR; bp->b_error = error; } return (0); } static struct vop_vector tarfs_znodeops = { .vop_default = &default_vnodeops, .vop_access = tarfs_zaccess, .vop_getattr = tarfs_zgetattr, .vop_read = tarfs_zread, .vop_reclaim = tarfs_zreclaim, .vop_strategy = tarfs_zstrategy, }; VFS_VOP_VECTOR_REGISTER(tarfs_znodeops); /* * Initializes the decompression layer. */ static struct tarfs_zio * tarfs_zio_init(struct tarfs_mount *tmp, off_t i, off_t o) { struct tarfs_zio *zio; struct vnode *zvp; zio = malloc(sizeof(*zio), M_TARFSZSTATE, M_ZERO | M_WAITOK); TARFS_DPF(ALLOC, "%s: allocated zio\n", __func__); zio->tmp = tmp; zio->szidx = 128; zio->idx = malloc(zio->szidx * sizeof(*zio->idx), M_TARFSZSTATE, M_ZERO | M_WAITOK); zio->curidx = 0; zio->nidx = 1; zio->idx[zio->curidx].i = zio->ipos = i; zio->idx[zio->curidx].o = zio->opos = o; tmp->zio = zio; TARFS_DPF(ALLOC, "%s: allocated zio index\n", __func__); getnewvnode("tarfsz", tmp->vfs, &tarfs_znodeops, &zvp); zvp->v_data = zio; zvp->v_type = VREG; zvp->v_mount = tmp->vfs; vn_set_state(zvp, VSTATE_CONSTRUCTED); tmp->znode = zvp; TARFS_DPF(ZIO, "%s: created zio node\n", __func__); return (zio); } /* * Initializes the I/O layer, including decompression if the signature of * a supported compression format is detected. Returns 0 on success and a * positive errno value on failure. */ int tarfs_io_init(struct tarfs_mount *tmp) { uint8_t *block; #ifdef ZSTDIO struct tarfs_zio *zio = NULL; #endif ssize_t res; int error = 0; block = malloc(tmp->iosize, M_TEMP, M_ZERO | M_WAITOK); res = tarfs_io_read_buf(tmp, true, block, 0, tmp->iosize); if (res < 0) { return (-res); } if (memcmp(block, XZ_MAGIC, sizeof(XZ_MAGIC)) == 0) { printf("xz compression not supported\n"); error = EOPNOTSUPP; goto bad; } else if (memcmp(block, ZLIB_MAGIC, sizeof(ZLIB_MAGIC)) == 0) { printf("zlib compression not supported\n"); error = EOPNOTSUPP; goto bad; } else if (memcmp(block, ZSTD_MAGIC, sizeof(ZSTD_MAGIC)) == 0) { #ifdef ZSTDIO zio = tarfs_zio_init(tmp, 0, 0); zio->zstd = malloc(sizeof(*zio->zstd), M_TARFSZSTATE, M_WAITOK); zio->zstd->zds = ZSTD_createDStream_advanced(tarfs_zstd_mem); (void)ZSTD_initDStream(zio->zstd->zds); #else printf("zstd compression not supported\n"); error = EOPNOTSUPP; goto bad; #endif } bad: free(block, M_TEMP); return (error); } /* * Tears down the decompression layer. */ static int tarfs_zio_fini(struct tarfs_mount *tmp) { struct tarfs_zio *zio = tmp->zio; int error = 0; if (tmp->znode != NULL) { error = vn_lock(tmp->znode, LK_EXCLUSIVE); if (error != 0) { TARFS_DPF(ALLOC, "%s: failed to lock znode", __func__); return (error); } tmp->znode->v_mount = NULL; vgone(tmp->znode); vput(tmp->znode); tmp->znode = NULL; } #ifdef ZSTDIO if (zio->zstd != NULL) { TARFS_DPF(ALLOC, "%s: freeing zstd state\n", __func__); ZSTD_freeDStream(zio->zstd->zds); free(zio->zstd, M_TARFSZSTATE); } #endif if (zio->idx != NULL) { TARFS_DPF(ALLOC, "%s: freeing index\n", __func__); free(zio->idx, M_TARFSZSTATE); } TARFS_DPF(ALLOC, "%s: freeing zio\n", __func__); free(zio, M_TARFSZSTATE); tmp->zio = NULL; return (error); } /* * Tears down the I/O layer, including the decompression layer if * applicable. */ int tarfs_io_fini(struct tarfs_mount *tmp) { int error = 0; if (tmp->zio != NULL) { error = tarfs_zio_fini(tmp); } return (error); } diff --git a/sys/fs/tarfs/tarfs_vnops.c b/sys/fs/tarfs/tarfs_vnops.c index 43c434e370e3..266002bca7b2 100644 --- a/sys/fs/tarfs/tarfs_vnops.c +++ b/sys/fs/tarfs/tarfs_vnops.c @@ -1,645 +1,643 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Juniper Networks, Inc. * Copyright (c) 2022-2023 Klara, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_tarfs.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static int tarfs_open(struct vop_open_args *ap) { struct tarfs_node *tnp; struct vnode *vp; vp = ap->a_vp; MPASS(VOP_ISLOCKED(vp)); tnp = VP_TO_TARFS_NODE(vp); TARFS_DPF(VNODE, "%s(%p=%s, %o)\n", __func__, tnp, tnp->name, ap->a_mode); if (vp->v_type != VREG && vp->v_type != VDIR) return (EOPNOTSUPP); vnode_create_vobject(vp, tnp->size, ap->a_td); return (0); } static int tarfs_close(struct vop_close_args *ap) { #ifdef TARFS_DEBUG struct tarfs_node *tnp; struct vnode *vp; vp = ap->a_vp; MPASS(VOP_ISLOCKED(vp)); tnp = VP_TO_TARFS_NODE(vp); TARFS_DPF(VNODE, "%s(%p=%s)\n", __func__, tnp, tnp->name); #else (void)ap; #endif return (0); } static int tarfs_access(struct vop_access_args *ap) { struct tarfs_node *tnp; struct vnode *vp; accmode_t accmode; struct ucred *cred; int error; vp = ap->a_vp; accmode = ap->a_accmode; cred = ap->a_cred; MPASS(VOP_ISLOCKED(vp)); tnp = VP_TO_TARFS_NODE(vp); TARFS_DPF(VNODE, "%s(%p=%s, %o)\n", __func__, tnp, tnp->name, accmode); switch (vp->v_type) { case VDIR: case VLNK: case VREG: if ((accmode & VWRITE) != 0) return (EROFS); break; case VBLK: case VCHR: case VFIFO: break; default: return (EINVAL); } if ((accmode & VWRITE) != 0) return (EPERM); error = vaccess(vp->v_type, tnp->mode, tnp->uid, tnp->gid, accmode, cred); return (error); } static int tarfs_getattr(struct vop_getattr_args *ap) { struct tarfs_node *tnp; struct vnode *vp; struct vattr *vap; vp = ap->a_vp; vap = ap->a_vap; tnp = VP_TO_TARFS_NODE(vp); TARFS_DPF(VNODE, "%s(%p=%s)\n", __func__, tnp, tnp->name); vap->va_type = vp->v_type; vap->va_mode = tnp->mode; vap->va_nlink = tnp->nlink; vap->va_gid = tnp->gid; vap->va_uid = tnp->uid; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; vap->va_fileid = tnp->ino; vap->va_size = tnp->size; vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; vap->va_atime = tnp->atime; vap->va_ctime = tnp->ctime; vap->va_mtime = tnp->mtime; vap->va_birthtime = tnp->birthtime; vap->va_gen = tnp->gen; vap->va_flags = tnp->flags; vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ? tnp->rdev : NODEV; vap->va_bytes = round_page(tnp->physize); vap->va_filerev = 0; return (0); } static int tarfs_lookup(struct vop_cachedlookup_args *ap) { struct tarfs_node *dirnode, *parent, *tnp; struct componentname *cnp; struct vnode *dvp, **vpp; #ifdef TARFS_DEBUG struct vnode *vp; #endif int error; dvp = ap->a_dvp; vpp = ap->a_vpp; cnp = ap->a_cnp; *vpp = NULLVP; dirnode = VP_TO_TARFS_NODE(dvp); parent = dirnode->parent; tnp = NULL; TARFS_DPF(LOOKUP, "%s(%p=%s, %.*s)\n", __func__, dirnode, dirnode->name, (int)cnp->cn_namelen, cnp->cn_nameptr); error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, curthread); if (error != 0) return (error); if (cnp->cn_flags & ISDOTDOT) { /* Do not allow .. on the root node */ if (parent == NULL || parent == dirnode) return (ENOENT); /* Allocate a new vnode on the matching entry */ error = vn_vget_ino(dvp, parent->ino, cnp->cn_lkflags, vpp); if (error != 0) return (error); } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { VREF(dvp); *vpp = dvp; #ifdef TARFS_DEBUG } else if (dirnode == dirnode->tmp->root && (vp = dirnode->tmp->znode) != NULL && cnp->cn_namelen == TARFS_ZIO_NAMELEN && memcmp(cnp->cn_nameptr, TARFS_ZIO_NAME, TARFS_ZIO_NAMELEN) == 0) { error = vn_lock(vp, cnp->cn_lkflags); if (error != 0) return (error); vref(vp); *vpp = vp; return (0); #endif } else { tnp = tarfs_lookup_node(dirnode, NULL, cnp); if (tnp == NULL) { TARFS_DPF(LOOKUP, "%s(%p=%s, %.*s): file not found\n", __func__, dirnode, dirnode->name, (int)cnp->cn_namelen, cnp->cn_nameptr); return (ENOENT); } if ((cnp->cn_flags & ISLASTCN) == 0 && (tnp->type != VDIR && tnp->type != VLNK)) return (ENOTDIR); error = vn_vget_ino(dvp, tnp->ino, cnp->cn_lkflags, vpp); if (error != 0) return (error); } #ifdef TARFS_DEBUG if (tnp == NULL) tnp = VP_TO_TARFS_NODE(*vpp); TARFS_DPF(LOOKUP, "%s: found vnode %p, tarfs_node %p\n", __func__, *vpp, tnp); #endif /* TARFS_DEBUG */ /* Store the result the the cache if MAKEENTRY is specified in flags */ if ((cnp->cn_flags & MAKEENTRY) != 0 && cnp->cn_nameiop != CREATE) cache_enter(dvp, *vpp, cnp); return (error); } static int tarfs_readdir(struct vop_readdir_args *ap) { struct dirent cde; struct tarfs_node *current, *tnp; struct vnode *vp; struct uio *uio; int *eofflag; uint64_t **cookies; int *ncookies; off_t off; u_int idx, ndirents; int error; vp = ap->a_vp; uio = ap->a_uio; eofflag = ap->a_eofflag; cookies = ap->a_cookies; ncookies = ap->a_ncookies; if (vp->v_type != VDIR) return (ENOTDIR); tnp = VP_TO_TARFS_NODE(vp); off = uio->uio_offset; current = NULL; ndirents = 0; TARFS_DPF(VNODE, "%s(%p=%s, %zu, %zd)\n", __func__, tnp, tnp->name, uio->uio_offset, uio->uio_resid); if (uio->uio_offset == TARFS_COOKIE_EOF) { TARFS_DPF(VNODE, "%s: EOF\n", __func__); return (0); } if (uio->uio_offset == TARFS_COOKIE_DOT) { TARFS_DPF(VNODE, "%s: Generating . entry\n", __func__); /* fake . entry */ cde.d_fileno = tnp->ino; cde.d_type = DT_DIR; cde.d_namlen = 1; cde.d_name[0] = '.'; cde.d_name[1] = '\0'; cde.d_reclen = GENERIC_DIRSIZ(&cde); if (cde.d_reclen > uio->uio_resid) goto full; dirent_terminate(&cde); error = uiomove(&cde, cde.d_reclen, uio); if (error) return (error); /* next is .. */ uio->uio_offset = TARFS_COOKIE_DOTDOT; ndirents++; } if (uio->uio_offset == TARFS_COOKIE_DOTDOT) { TARFS_DPF(VNODE, "%s: Generating .. entry\n", __func__); /* fake .. entry */ MPASS(tnp->parent != NULL); TARFS_NODE_LOCK(tnp->parent); cde.d_fileno = tnp->parent->ino; TARFS_NODE_UNLOCK(tnp->parent); cde.d_type = DT_DIR; cde.d_namlen = 2; cde.d_name[0] = '.'; cde.d_name[1] = '.'; cde.d_name[2] = '\0'; cde.d_reclen = GENERIC_DIRSIZ(&cde); if (cde.d_reclen > uio->uio_resid) goto full; dirent_terminate(&cde); error = uiomove(&cde, cde.d_reclen, uio); if (error) return (error); /* next is first child */ current = TAILQ_FIRST(&tnp->dir.dirhead); if (current == NULL) goto done; uio->uio_offset = current->ino; TARFS_DPF(VNODE, "%s: [%u] setting current node to %p=%s\n", __func__, ndirents, current, current->name); ndirents++; } /* resuming previous call */ if (current == NULL) { current = tarfs_lookup_dir(tnp, uio->uio_offset); if (current == NULL) { error = EINVAL; goto done; } uio->uio_offset = current->ino; TARFS_DPF(VNODE, "%s: [%u] setting current node to %p=%s\n", __func__, ndirents, current, current->name); } for (;;) { cde.d_fileno = current->ino; switch (current->type) { case VBLK: cde.d_type = DT_BLK; break; case VCHR: cde.d_type = DT_CHR; break; case VDIR: cde.d_type = DT_DIR; break; case VFIFO: cde.d_type = DT_FIFO; break; case VLNK: cde.d_type = DT_LNK; break; case VREG: cde.d_type = DT_REG; break; default: panic("%s: tarfs_node %p, type %d\n", __func__, current, current->type); } cde.d_namlen = current->namelen; MPASS(tnp->namelen < sizeof(cde.d_name)); (void)memcpy(cde.d_name, current->name, current->namelen); cde.d_name[current->namelen] = '\0'; cde.d_reclen = GENERIC_DIRSIZ(&cde); if (cde.d_reclen > uio->uio_resid) goto full; dirent_terminate(&cde); error = uiomove(&cde, cde.d_reclen, uio); if (error != 0) goto done; ndirents++; /* next sibling */ current = TAILQ_NEXT(current, dirents); if (current == NULL) goto done; uio->uio_offset = current->ino; TARFS_DPF(VNODE, "%s: [%u] setting current node to %p=%s\n", __func__, ndirents, current, current->name); } full: if (cde.d_reclen > uio->uio_resid) { TARFS_DPF(VNODE, "%s: out of space, returning\n", __func__); error = (ndirents == 0) ? EINVAL : 0; } done: TARFS_DPF(VNODE, "%s: %u entries written\n", __func__, ndirents); TARFS_DPF(VNODE, "%s: saving cache information\n", __func__); if (current == NULL) { uio->uio_offset = TARFS_COOKIE_EOF; tnp->dir.lastcookie = 0; tnp->dir.lastnode = NULL; } else { tnp->dir.lastcookie = current->ino; tnp->dir.lastnode = current; } if (eofflag != NULL) { TARFS_DPF(VNODE, "%s: Setting EOF flag\n", __func__); *eofflag = (error == 0 && current == NULL); } /* Update for NFS */ if (error == 0 && cookies != NULL && ncookies != NULL) { TARFS_DPF(VNODE, "%s: Updating NFS cookies\n", __func__); current = NULL; *cookies = malloc(ndirents * sizeof(off_t), M_TEMP, M_WAITOK); *ncookies = ndirents; for (idx = 0; idx < ndirents; idx++) { if (off == TARFS_COOKIE_DOT) off = TARFS_COOKIE_DOTDOT; else { if (off == TARFS_COOKIE_DOTDOT) { current = TAILQ_FIRST(&tnp->dir.dirhead); } else if (current != NULL) { current = TAILQ_NEXT(current, dirents); } else { current = tarfs_lookup_dir(tnp, off); current = TAILQ_NEXT(current, dirents); } if (current == NULL) off = TARFS_COOKIE_EOF; else off = current->ino; } TARFS_DPF(VNODE, "%s: [%u] offset %zu\n", __func__, idx, off); (*cookies)[idx] = off; } MPASS(uio->uio_offset == off); } return (error); } static int tarfs_read(struct vop_read_args *ap) { struct tarfs_node *tnp; struct uio *uiop; struct vnode *vp; size_t len; off_t resid; int error; uiop = ap->a_uio; vp = ap->a_vp; if (vp->v_type == VCHR || vp->v_type == VBLK) return (EOPNOTSUPP); if (vp->v_type != VREG) return (EISDIR); if (uiop->uio_offset < 0) return (EINVAL); tnp = VP_TO_TARFS_NODE(vp); error = 0; TARFS_DPF(VNODE, "%s(%p=%s, %zu, %zd)\n", __func__, tnp, tnp->name, uiop->uio_offset, uiop->uio_resid); while ((resid = uiop->uio_resid) > 0) { if (tnp->size <= uiop->uio_offset) break; len = MIN(tnp->size - uiop->uio_offset, resid); if (len == 0) break; error = tarfs_read_file(tnp, len, uiop); if (error != 0 || resid == uiop->uio_resid) break; } return (error); } static int tarfs_readlink(struct vop_readlink_args *ap) { struct tarfs_node *tnp; struct uio *uiop; struct vnode *vp; int error; uiop = ap->a_uio; vp = ap->a_vp; MPASS(uiop->uio_offset == 0); MPASS(vp->v_type == VLNK); tnp = VP_TO_TARFS_NODE(vp); TARFS_DPF(VNODE, "%s(%p=%s)\n", __func__, tnp, tnp->name); error = uiomove(tnp->link.name, MIN(tnp->size, uiop->uio_resid), uiop); return (error); } static int tarfs_reclaim(struct vop_reclaim_args *ap) { struct tarfs_node *tnp; struct vnode *vp; vp = ap->a_vp; tnp = VP_TO_TARFS_NODE(vp); vfs_hash_remove(vp); - vnode_destroy_vobject(vp); - cache_purge(vp); TARFS_NODE_LOCK(tnp); tnp->vnode = NULLVP; vp->v_data = NULL; TARFS_NODE_UNLOCK(tnp); return (0); } static int tarfs_print(struct vop_print_args *ap) { struct tarfs_node *tnp; struct vnode *vp; vp = ap->a_vp; tnp = VP_TO_TARFS_NODE(vp); printf("tag tarfs, tarfs_node %p, links %lu\n", tnp, (unsigned long)tnp->nlink); printf("\tmode 0%o, owner %d, group %d, size %zd\n", tnp->mode, tnp->uid, tnp->gid, tnp->size); if (vp->v_type == VFIFO) fifo_printinfo(vp); printf("\n"); return (0); } static int tarfs_strategy(struct vop_strategy_args *ap) { struct uio auio; struct iovec iov; struct tarfs_node *tnp; struct buf *bp; off_t off; size_t len; int error; tnp = VP_TO_TARFS_NODE(ap->a_vp); bp = ap->a_bp; MPASS(bp->b_iocmd == BIO_READ); MPASS(bp->b_iooffset >= 0); MPASS(bp->b_bcount > 0); MPASS(bp->b_bufsize >= bp->b_bcount); TARFS_DPF(VNODE, "%s(%p=%s, %zu, %ld/%ld)\n", __func__, tnp, tnp->name, (size_t)bp->b_iooffset, bp->b_bcount, bp->b_bufsize); iov.iov_base = bp->b_data; iov.iov_len = bp->b_bcount; off = bp->b_iooffset; len = bp->b_bcount; bp->b_resid = len; if (off > tnp->size) { /* XXX read beyond EOF - figure out correct handling */ error = EIO; goto out; } if (off + len > tnp->size) { /* clip to file length */ len = tnp->size - off; } auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_offset = off; auio.uio_resid = len; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_td = curthread; error = tarfs_read_file(tnp, len, &auio); bp->b_resid -= len - auio.uio_resid; out: if (error != 0) { bp->b_ioflags |= BIO_ERROR; bp->b_error = error; } bp->b_flags |= B_DONE; return (0); } static int tarfs_vptofh(struct vop_vptofh_args *ap) { struct tarfs_fid *tfp; struct tarfs_node *tnp; tfp = (struct tarfs_fid *)ap->a_fhp; tnp = VP_TO_TARFS_NODE(ap->a_vp); tfp->len = sizeof(struct tarfs_fid); tfp->ino = tnp->ino; tfp->gen = tnp->gen; return (0); } struct vop_vector tarfs_vnodeops = { .vop_default = &default_vnodeops, .vop_access = tarfs_access, .vop_cachedlookup = tarfs_lookup, .vop_close = tarfs_close, .vop_getattr = tarfs_getattr, .vop_lookup = vfs_cache_lookup, .vop_open = tarfs_open, .vop_print = tarfs_print, .vop_read = tarfs_read, .vop_readdir = tarfs_readdir, .vop_readlink = tarfs_readlink, .vop_reclaim = tarfs_reclaim, .vop_strategy = tarfs_strategy, .vop_vptofh = tarfs_vptofh, }; VFS_VOP_VECTOR_REGISTER(tarfs_vnodeops);