Index: head/sys/fs/ext2fs/ext2_alloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_alloc.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_alloc.c (revision 361136) @@ -1,1569 +1,1582 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DEFINE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , alloc, trace, "int", "char*"); SDT_PROBE_DEFINE3(ext2fs, , alloc, ext2_reallocblks_realloc, "ino_t", "e2fs_lbn_t", "e2fs_lbn_t"); SDT_PROBE_DEFINE1(ext2fs, , alloc, ext2_reallocblks_bap, "uint32_t"); SDT_PROBE_DEFINE1(ext2fs, , alloc, ext2_reallocblks_blkno, "e2fs_daddr_t"); SDT_PROBE_DEFINE2(ext2fs, , alloc, ext2_b_bitmap_validate_error, "char*", "int"); SDT_PROBE_DEFINE3(ext2fs, , alloc, ext2_nodealloccg_bmap_corrupted, "int", "daddr_t", "char*"); SDT_PROBE_DEFINE2(ext2fs, , alloc, ext2_blkfree_bad_block, "ino_t", "e4fs_daddr_t"); SDT_PROBE_DEFINE2(ext2fs, , alloc, ext2_vfree_doublefree, "char*", "ino_t"); static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); static u_long ext2_dirpref(struct inode *); static e4fs_daddr_t ext2_hashalloc(struct inode *, int, long, int, daddr_t (*)(struct inode *, int, daddr_t, int)); static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); /* * Allocate a block in the filesystem. * * A preference may be optionally specified. If a preference is given * the following hierarchy is used to allocate a block: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate a block in the same cylinder group. * 4) quadradically rehash into other cylinder groups, until an * available block is located. * If no block preference is given the following hierarchy is used * to allocate a block: * 1) allocate a block in the cylinder group that contains the * inode for the file. * 2) quadradically rehash into other cylinder groups, until an * available block is located. */ int ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, struct ucred *cred, e4fs_daddr_t *bnp) { struct m_ext2fs *fs; struct ext2mount *ump; e4fs_daddr_t bno; int cg; *bnp = 0; fs = ip->i_e2fs; ump = ip->i_ump; mtx_assert(EXT2_MTX(ump), MA_OWNED); #ifdef INVARIANTS if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); panic("ext2_alloc: bad size"); } if (cred == NOCRED) panic("ext2_alloc: missing credential"); #endif /* INVARIANTS */ if (size == fs->e2fs_bsize && fs->e2fs_fbcount == 0) goto nospace; if (cred->cr_uid != 0 && fs->e2fs_fbcount < fs->e2fs_rbcount) goto nospace; if (bpref >= fs->e2fs_bcount) bpref = 0; if (bpref == 0) cg = ino_to_cg(fs, ip->i_number); else cg = dtog(fs, bpref); bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, ext2_alloccg); if (bno > 0) { /* set next_alloc fields as done in block_getblk */ ip->i_next_alloc_block = lbn; ip->i_next_alloc_goal = bno; ip->i_blocks += btodb(fs->e2fs_bsize); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bnp = bno; return (0); } nospace: EXT2_UNLOCK(ump); SDT_PROBE2(ext2fs, , alloc, trace, 1, "cannot allocate data block"); return (ENOSPC); } /* * Allocate EA's block for inode. */ e4fs_daddr_t ext2_alloc_meta(struct inode *ip) { struct m_ext2fs *fs; daddr_t blk; fs = ip->i_e2fs; EXT2_LOCK(ip->i_ump); blk = ext2_hashalloc(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize, ext2_alloccg); if (0 == blk) { EXT2_UNLOCK(ip->i_ump); SDT_PROBE2(ext2fs, , alloc, trace, 1, "cannot allocate meta block"); } return (blk); } /* * Reallocate a sequence of blocks into a contiguous sequence of blocks. * * The vnode and an array of buffer pointers for a range of sequential * logical blocks to be made contiguous is given. The allocator attempts * to find a range of sequential blocks starting as close as possible to * an fs_rotdelay offset from the end of the allocation for the logical * block immediately preceding the current range. If successful, the * physical block numbers in the buffer pointers and in the inode are * changed to reflect the new allocation. If unsuccessful, the allocation * is left unchanged. The success in doing the reallocation is returned. * Note that the error return is not reflected back to the user. Rather * the previous block allocation will be used. */ static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "EXT2FS filesystem"); static int doasyncfree = 1; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "Use asychronous writes to update block pointers when freeing blocks"); static int doreallocblks = 0; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); int ext2_reallocblks(struct vop_reallocblks_args *ap) { struct m_ext2fs *fs; struct inode *ip; struct vnode *vp; struct buf *sbp, *ebp; uint32_t *bap, *sbap, *ebap; struct ext2mount *ump; struct cluster_save *buflist; struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; e2fs_lbn_t start_lbn, end_lbn; int soff; e2fs_daddr_t newblk, blkno; int i, len, start_lvl, end_lvl, pref, ssize; if (doreallocblks == 0) return (ENOSPC); vp = ap->a_vp; ip = VTOI(vp); fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_contigsumsize <= 0 || ip->i_flag & IN_E4EXTENTS) return (ENOSPC); buflist = ap->a_buflist; len = buflist->bs_nchildren; start_lbn = buflist->bs_children[0]->b_lblkno; end_lbn = start_lbn + len - 1; #ifdef INVARIANTS for (i = 1; i < len; i++) if (buflist->bs_children[i]->b_lblkno != start_lbn + i) panic("ext2_reallocblks: non-cluster"); #endif /* * If the cluster crosses the boundary for the first indirect * block, leave space for the indirect block. Indirect blocks * are initially laid out in a position after the last direct * block. Block reallocation would usually destroy locality by * moving the indirect block out of the way to make room for * data blocks if we didn't compensate here. We should also do * this for other indirect block boundaries, but it is only * important for the first one. */ if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) return (ENOSPC); /* * If the latest allocation is in a new cylinder group, assume that * the filesystem has decided to move and do not force it back to * the previous cylinder group. */ if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) return (ENOSPC); if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) return (ENOSPC); /* * Get the starting offset and block map for the first block. */ if (start_lvl == 0) { sbap = &ip->i_db[0]; soff = start_lbn; } else { idp = &start_ap[start_lvl - 1]; if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { brelse(sbp); return (ENOSPC); } sbap = (u_int *)sbp->b_data; soff = idp->in_off; } /* * If the block range spans two block maps, get the second map. */ ebap = NULL; if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { ssize = len; } else { #ifdef INVARIANTS if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) panic("ext2_reallocblks: start == end"); #endif ssize = len - (idp->in_off + 1); if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) goto fail; ebap = (u_int *)ebp->b_data; } /* * Find the preferred location for the cluster. */ EXT2_LOCK(ump); pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); /* * Search the block map looking for an allocation of the desired size. */ if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, len, ext2_clusteralloc)) == 0) { EXT2_UNLOCK(ump); goto fail; } /* * We have found a new contiguous block. * * First we have to replace the old block pointers with the new * block pointers in the inode and indirect blocks associated * with the file. */ SDT_PROBE3(ext2fs, , alloc, ext2_reallocblks_realloc, ip->i_number, start_lbn, end_lbn); blkno = newblk; for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { if (i == ssize) { bap = ebap; soff = -i; } #ifdef INVARIANTS if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) panic("ext2_reallocblks: alloc mismatch"); #endif SDT_PROBE1(ext2fs, , alloc, ext2_reallocblks_bap, *bap); *bap++ = blkno; } /* * Next we must write out the modified inode and indirect blocks. * For strict correctness, the writes should be synchronous since * the old block values may have been written to disk. In practise * they are almost never written, but if we are concerned about * strict correctness, the `doasyncfree' flag should be set to zero. * * The test on `doasyncfree' should be changed to test a flag * that shows whether the associated buffers and inodes have * been written. The flag should be set when the cluster is * started and cleared whenever the buffer or inode is flushed. * We can then check below to see if it is set, and do the * synchronous write only when it has been cleared. */ if (sbap != &ip->i_db[0]) { if (doasyncfree) bdwrite(sbp); else bwrite(sbp); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (!doasyncfree) ext2_update(vp, 1); } if (ssize < len) { if (doasyncfree) bdwrite(ebp); else bwrite(ebp); } /* * Last, free the old blocks and assign the new blocks to the buffers. */ for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->e2fs_bsize); buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); SDT_PROBE1(ext2fs, , alloc, ext2_reallocblks_blkno, blkno); } return (0); fail: if (ssize < len) brelse(ebp); if (sbap != &ip->i_db[0]) brelse(sbp); return (ENOSPC); } /* * Allocate an inode in the filesystem. * */ int ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) { struct timespec ts; struct m_ext2fs *fs; struct ext2mount *ump; struct inode *pip; struct inode *ip; struct vnode *vp; struct thread *td; ino_t ino, ipref; int error, cg; *vpp = NULL; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; EXT2_LOCK(ump); - if (fs->e2fs->e2fs_ficount == 0) + if (fs->e2fs_ficount == 0) goto noinodes; /* * If it is a directory then obtain a cylinder group based on * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is * always the next inode. */ if ((mode & IFMT) == IFDIR) { cg = ext2_dirpref(pip); if (fs->e2fs_contigdirs[cg] < 255) fs->e2fs_contigdirs[cg]++; } else { cg = ino_to_cg(fs, pip->i_number); if (fs->e2fs_contigdirs[cg] > 0) fs->e2fs_contigdirs[cg]--; } - ipref = cg * fs->e2fs->e2fs_ipg + 1; + ipref = cg * fs->e2fs_ipg + 1; ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); if (ino == 0) goto noinodes; td = curthread; error = vfs_hash_get(ump->um_mountp, ino, LK_EXCLUSIVE, td, vpp, NULL, NULL); if (error || *vpp != NULL) { return (error); } ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); if (ip == NULL) { return (ENOMEM); } /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", ump->um_mountp, &ext2_vnodeops, &vp)) != 0) { free(ip, M_EXT2NODE); return (error); } lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; error = insmntque(vp, ump->um_mountp); if (error) { free(ip, M_EXT2NODE); return (error); } error = vfs_hash_insert(vp, ino, LK_EXCLUSIVE, td, vpp, NULL, NULL); if (error || *vpp != NULL) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } if ((error = ext2_vinit(ump->um_mountp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; free(ip, M_EXT2NODE); return (error); } if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_EXTENTS) && (S_ISREG(mode) || S_ISDIR(mode))) ext4_ext_tree_init(ip); else memset(ip->i_data, 0, sizeof(ip->i_data)); /* * Set up a new generation number for this inode. * Avoid zero values. */ do { ip->i_gen = arc4random(); } while (ip->i_gen == 0); vfs_timestamp(&ts); ip->i_birthtime = ts.tv_sec; ip->i_birthnsec = ts.tv_nsec; *vpp = vp; return (0); noinodes: EXT2_UNLOCK(ump); SDT_PROBE2(ext2fs, , alloc, trace, 1, "out of inodes"); return (ENOSPC); } /* * 64-bit compatible getters and setters for struct ext2_gd from ext2fs.h */ uint64_t e2fs_gd_get_b_bitmap(struct ext2_gd *gd) { - return (((uint64_t)(gd->ext4bgd_b_bitmap_hi) << 32) | - gd->ext2bgd_b_bitmap); + return (((uint64_t)(le32toh(gd->ext4bgd_b_bitmap_hi)) << 32) | + le32toh(gd->ext2bgd_b_bitmap)); } uint64_t e2fs_gd_get_i_bitmap(struct ext2_gd *gd) { - return (((uint64_t)(gd->ext4bgd_i_bitmap_hi) << 32) | - gd->ext2bgd_i_bitmap); + return (((uint64_t)(le32toh(gd->ext4bgd_i_bitmap_hi)) << 32) | + le32toh(gd->ext2bgd_i_bitmap)); } uint64_t e2fs_gd_get_i_tables(struct ext2_gd *gd) { - return (((uint64_t)(gd->ext4bgd_i_tables_hi) << 32) | - gd->ext2bgd_i_tables); + return (((uint64_t)(le32toh(gd->ext4bgd_i_tables_hi)) << 32) | + le32toh(gd->ext2bgd_i_tables)); } static uint32_t e2fs_gd_get_nbfree(struct ext2_gd *gd) { - return (((uint32_t)(gd->ext4bgd_nbfree_hi) << 16) | - gd->ext2bgd_nbfree); + return (((uint32_t)(le16toh(gd->ext4bgd_nbfree_hi)) << 16) | + le16toh(gd->ext2bgd_nbfree)); } static void e2fs_gd_set_nbfree(struct ext2_gd *gd, uint32_t val) { - gd->ext2bgd_nbfree = val & 0xffff; - gd->ext4bgd_nbfree_hi = val >> 16; + gd->ext2bgd_nbfree = htole16(val & 0xffff); + gd->ext4bgd_nbfree_hi = htole16(val >> 16); } static uint32_t e2fs_gd_get_nifree(struct ext2_gd *gd) { - return (((uint32_t)(gd->ext4bgd_nifree_hi) << 16) | - gd->ext2bgd_nifree); + return (((uint32_t)(le16toh(gd->ext4bgd_nifree_hi)) << 16) | + le16toh(gd->ext2bgd_nifree)); } static void e2fs_gd_set_nifree(struct ext2_gd *gd, uint32_t val) { - gd->ext2bgd_nifree = val & 0xffff; - gd->ext4bgd_nifree_hi = val >> 16; + gd->ext2bgd_nifree = htole16(val & 0xffff); + gd->ext4bgd_nifree_hi = htole16(val >> 16); } uint32_t e2fs_gd_get_ndirs(struct ext2_gd *gd) { - return (((uint32_t)(gd->ext4bgd_ndirs_hi) << 16) | - gd->ext2bgd_ndirs); + return (((uint32_t)(le16toh(gd->ext4bgd_ndirs_hi)) << 16) | + le16toh(gd->ext2bgd_ndirs)); } static void e2fs_gd_set_ndirs(struct ext2_gd *gd, uint32_t val) { - gd->ext2bgd_ndirs = val & 0xffff; - gd->ext4bgd_ndirs_hi = val >> 16; + gd->ext2bgd_ndirs = htole16(val & 0xffff); + gd->ext4bgd_ndirs_hi = htole16(val >> 16); } static uint32_t e2fs_gd_get_i_unused(struct ext2_gd *gd) { - return (((uint32_t)(gd->ext4bgd_i_unused_hi) << 16) | - gd->ext4bgd_i_unused); + return ((uint32_t)(le16toh(gd->ext4bgd_i_unused_hi) << 16) | + le16toh(gd->ext4bgd_i_unused)); } static void e2fs_gd_set_i_unused(struct ext2_gd *gd, uint32_t val) { - gd->ext4bgd_i_unused = val & 0xffff; - gd->ext4bgd_i_unused_hi = val >> 16; + gd->ext4bgd_i_unused = htole16(val & 0xffff); + gd->ext4bgd_i_unused_hi = htole16(val >> 16); } /* * Find a cylinder to place a directory. * * The policy implemented by this algorithm is to allocate a * directory inode in the same cylinder group as its parent * directory, but also to reserve space for its files inodes * and data. Restrict the number of directories which may be * allocated one after another in the same cylinder group * without intervening allocation of files. * * If we allocate a first level directory then force allocation * in another cylinder group. * */ static u_long ext2_dirpref(struct inode *pip) { struct m_ext2fs *fs; int cg, prefcg, cgsize; uint64_t avgbfree, minbfree; u_int avgifree, avgndir, curdirsize; u_int minifree, maxndir; u_int mincg, minndir; u_int dirsize, maxcontigdirs; mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); fs = pip->i_e2fs; - avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; + avgifree = fs->e2fs_ficount / fs->e2fs_gcount; avgbfree = fs->e2fs_fbcount / fs->e2fs_gcount; avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; /* * Force allocation in another cg if creating a first level dir. */ ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); if (ITOV(pip)->v_vflag & VV_ROOT) { prefcg = arc4random() % fs->e2fs_gcount; mincg = prefcg; minndir = fs->e2fs_ipg; for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { mincg = cg; minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); } for (cg = 0; cg < prefcg; cg++) if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { mincg = cg; minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); } return (mincg); } /* * Count various limits which used for * optimal allocation of a directory inode. */ maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); minifree = avgifree - avgifree / 4; if (minifree < 1) minifree = 1; minbfree = avgbfree - avgbfree / 4; if (minbfree < 1) minbfree = 1; cgsize = fs->e2fs_fsize * fs->e2fs_fpg; dirsize = AVGDIRSIZE; - curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; + curdirsize = avgndir ? + (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; if (dirsize < curdirsize) dirsize = curdirsize; maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); if (maxcontigdirs == 0) maxcontigdirs = 1; /* * Limit number of dirs in one cg and reserve space for * regular files, but only if we have no deficit in * inodes or space. */ prefcg = ino_to_cg(fs, pip->i_number); for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } for (cg = 0; cg < prefcg; cg++) if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } /* * This is a backstop when we have deficit in space. */ for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) return (cg); for (cg = 0; cg < prefcg; cg++) if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) break; return (cg); } /* * Select the desired position for the next block in a file. * * we try to mimic what Remy does in inode_getblk/block_getblk * * we note: blocknr == 0 means that we're about to allocate either * a direct block or a pointer block at the first level of indirection * (In other words, stuff that will go in i_db[] or i_ib[]) * * blocknr != 0 means that we're allocating a block that is none * of the above. Then, blocknr tells us the number of the block * that will hold the pointer */ e4fs_daddr_t ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, e2fs_daddr_t blocknr) { struct m_ext2fs *fs; int tmp; fs = ip->i_e2fs; mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); /* * If the next block is actually what we thought it is, then set the * goal to what we thought it should be. */ if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) return ip->i_next_alloc_goal; /* * Now check whether we were provided with an array that basically * tells us previous blocks to which we want to stay close. */ if (bap) for (tmp = indx - 1; tmp >= 0; tmp--) if (bap[tmp]) - return bap[tmp]; + return (le32toh(bap[tmp])); /* * Else lets fall back to the blocknr or, if there is none, follow * the rule that a block should be allocated near its inode. */ return (blocknr ? blocknr : (e2fs_daddr_t)(ip->i_block_group * - EXT2_BLOCKS_PER_GROUP(fs)) + fs->e2fs->e2fs_first_dblock); + EXT2_BLOCKS_PER_GROUP(fs)) + le32toh(fs->e2fs->e2fs_first_dblock)); } /* * Implement the cylinder overflow algorithm. * * The policy implemented by this algorithm is: * 1) allocate the block in its requested cylinder group. * 2) quadradically rehash on the cylinder group number. * 3) brute force search for a free block. */ static e4fs_daddr_t ext2_hashalloc(struct inode *ip, int cg, long pref, int size, daddr_t (*allocator) (struct inode *, int, daddr_t, int)) { struct m_ext2fs *fs; e4fs_daddr_t result; int i, icg = cg; mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); fs = ip->i_e2fs; /* * 1: preferred cylinder group */ result = (*allocator)(ip, cg, pref, size); if (result) return (result); /* * 2: quadratic rehash */ for (i = 1; i < fs->e2fs_gcount; i *= 2) { cg += i; if (cg >= fs->e2fs_gcount) cg -= fs->e2fs_gcount; result = (*allocator)(ip, cg, 0, size); if (result) return (result); } /* * 3: brute force search * Note that we start at i == 2, since 0 was checked initially, * and 1 is always checked in the quadratic rehash. */ cg = (icg + 2) % fs->e2fs_gcount; for (i = 2; i < fs->e2fs_gcount; i++) { result = (*allocator)(ip, cg, 0, size); if (result) return (result); cg++; if (cg == fs->e2fs_gcount) cg = 0; } return (0); } static uint64_t ext2_cg_number_gdb_nometa(struct m_ext2fs *fs, int cg) { if (!ext2_cg_has_sb(fs, cg)) return (0); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) - return (fs->e2fs->e3fs_first_meta_bg); + return (le32toh(fs->e2fs->e3fs_first_meta_bg)); return ((fs->e2fs_gcount + EXT2_DESCS_PER_BLOCK(fs) - 1) / EXT2_DESCS_PER_BLOCK(fs)); } static uint64_t ext2_cg_number_gdb_meta(struct m_ext2fs *fs, int cg) { unsigned long metagroup; int first, last; metagroup = cg / EXT2_DESCS_PER_BLOCK(fs); first = metagroup * EXT2_DESCS_PER_BLOCK(fs); last = first + EXT2_DESCS_PER_BLOCK(fs) - 1; if (cg == first || cg == first + 1 || cg == last) return (1); return (0); } uint64_t ext2_cg_number_gdb(struct m_ext2fs *fs, int cg) { unsigned long first_meta_bg, metagroup; - first_meta_bg = fs->e2fs->e3fs_first_meta_bg; + first_meta_bg = le32toh(fs->e2fs->e3fs_first_meta_bg); metagroup = cg / EXT2_DESCS_PER_BLOCK(fs); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || metagroup < first_meta_bg) return (ext2_cg_number_gdb_nometa(fs, cg)); return ext2_cg_number_gdb_meta(fs, cg); } static int ext2_number_base_meta_blocks(struct m_ext2fs *fs, int cg) { int number; number = ext2_cg_has_sb(fs, cg); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || - cg < fs->e2fs->e3fs_first_meta_bg * EXT2_DESCS_PER_BLOCK(fs)) { + cg < le32toh(fs->e2fs->e3fs_first_meta_bg) * + EXT2_DESCS_PER_BLOCK(fs)) { if (number) { number += ext2_cg_number_gdb(fs, cg); - number += fs->e2fs->e2fs_reserved_ngdb; + number += le16toh(fs->e2fs->e2fs_reserved_ngdb); } } else { number += ext2_cg_number_gdb(fs, cg); } return (number); } static void ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) { int i; if (start_bit >= end_bit) return; for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) setbit(bitmap, i); if (i < end_bit) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } static int ext2_get_group_number(struct m_ext2fs *fs, e4fs_daddr_t block) { - return ((block - fs->e2fs->e2fs_first_dblock) / fs->e2fs_bsize); + return ((block - le32toh(fs->e2fs->e2fs_first_dblock)) / + fs->e2fs_bsize); } static int ext2_block_in_group(struct m_ext2fs *fs, e4fs_daddr_t block, int cg) { return ((ext2_get_group_number(fs, block) == cg) ? 1 : 0); } static int ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) { int bit, bit_max, inodes_per_block; uint64_t start, tmp; - if (!(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) + if (!(le16toh(fs->e2fs_gd[cg].ext4bgd_flags) & EXT2_BG_BLOCK_UNINIT)) return (0); memset(bp->b_data, 0, fs->e2fs_bsize); bit_max = ext2_number_base_meta_blocks(fs, cg); if ((bit_max >> 3) >= fs->e2fs_bsize) return (EINVAL); for (bit = 0; bit < bit_max; bit++) setbit(bp->b_data, bit); - start = (uint64_t)cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; + start = (uint64_t)cg * fs->e2fs_bpg + + le32toh(fs->e2fs->e2fs_first_dblock); /* Set bits for block and inode bitmaps, and inode table. */ tmp = e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg]); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || ext2_block_in_group(fs, tmp, cg)) setbit(bp->b_data, tmp - start); tmp = e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg]); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || ext2_block_in_group(fs, tmp, cg)) setbit(bp->b_data, tmp - start); tmp = e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]); inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); while( tmp < e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + - fs->e2fs->e2fs_ipg / inodes_per_block ) { + fs->e2fs_ipg / inodes_per_block ) { if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || ext2_block_in_group(fs, tmp, cg)) setbit(bp->b_data, tmp - start); tmp++; } /* * Also if the number of blocks within the group is less than * the blocksize * 8 ( which is the size of bitmap ), set rest * of the block bitmap to 1 */ - ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, + ext2_mark_bitmap_end(fs->e2fs_bpg, fs->e2fs_bsize * 8, bp->b_data); /* Clean the flag */ - fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; + fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh( + fs->e2fs_gd[cg].ext4bgd_flags) & ~EXT2_BG_BLOCK_UNINIT); return (0); } static int ext2_b_bitmap_validate(struct m_ext2fs *fs, struct buf *bp, int cg) { struct ext2_gd *gd; uint64_t group_first_block; unsigned int offset, max_bit; if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG)) { /* - * It is not possible to check block bitmap in case of this feature, - * because the inode and block bitmaps and inode table + * It is not possible to check block bitmap in case of this + * feature, because the inode and block bitmaps and inode table * blocks may not be in the group at all. * So, skip check in this case. */ return (0); } gd = &fs->e2fs_gd[cg]; max_bit = fs->e2fs_fpg; - group_first_block = ((uint64_t)cg) * fs->e2fs->e2fs_fpg + - fs->e2fs->e2fs_first_dblock; + group_first_block = ((uint64_t)cg) * fs->e2fs_fpg + + le32toh(fs->e2fs->e2fs_first_dblock); /* Check block bitmap block number */ offset = e2fs_gd_get_b_bitmap(gd) - group_first_block; if (offset >= max_bit || !isset(bp->b_data, offset)) { SDT_PROBE2(ext2fs, , alloc, ext2_b_bitmap_validate_error, "bad block bitmap, group", cg); return (EINVAL); } /* Check inode bitmap block number */ offset = e2fs_gd_get_i_bitmap(gd) - group_first_block; if (offset >= max_bit || !isset(bp->b_data, offset)) { SDT_PROBE2(ext2fs, , alloc, ext2_b_bitmap_validate_error, "bad inode bitmap", cg); return (EINVAL); } /* Check inode table */ offset = e2fs_gd_get_i_tables(gd) - group_first_block; if (offset >= max_bit || offset + fs->e2fs_itpg >= max_bit) { SDT_PROBE2(ext2fs, , alloc, ext2_b_bitmap_validate_error, "bad inode table, group", cg); return (EINVAL); } return (0); } /* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */ static daddr_t ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; daddr_t bno, runstart, runlen; int bit, loc, end, error, start; char *bbp; /* XXX ondisk32 */ fs = ip->i_e2fs; ump = ip->i_ump; if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) goto fail; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { error = ext2_cg_block_bitmap_init(fs, cg, bp); if (error) goto fail; ext2_gd_b_bitmap_csum_set(fs, cg, bp); } error = ext2_gd_b_bitmap_csum_verify(fs, cg, bp); if (error) goto fail; error = ext2_b_bitmap_validate(fs,bp, cg); if (error) goto fail; /* - * Check, that another thread did not not allocate the last block in this - * group while we were waiting for the buffer. + * Check, that another thread did not not allocate the last block in + * this group while we were waiting for the buffer. */ if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) goto fail; bbp = (char *)bp->b_data; if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) { bpref = dtogd(fs, bpref); /* * if the requested block is available, use it */ if (isclr(bbp, bpref)) { bno = bpref; goto gotit; } } /* * no blocks in the requested cylinder, so take next * available one in this cylinder group. * first try to get 8 contigous blocks, then fall back to a single * block. */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; - end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; + end = howmany(fs->e2fs_fpg, NBBY) - start; retry: runlen = 0; runstart = 0; for (loc = start; loc < end; loc++) { if (bbp[loc] == (char)0xff) { runlen = 0; continue; } /* Start of a run, find the number of high clear bits. */ if (runlen == 0) { bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } else if (bbp[loc] == 0) { /* Continue a run. */ runlen += NBBY; } else { /* * Finish the current run. If it isn't long * enough, start a new one. */ bit = ffs(bbp[loc]) - 1; runlen += bit; if (runlen >= 8) { bno = runstart; goto gotit; } /* Run was too short, start a new one. */ bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } /* If the current run is long enough, use it. */ if (runlen >= 8) { bno = runstart; goto gotit; } } if (start != 0) { end = start; start = 0; goto retry; } bno = ext2_mapsearch(fs, bbp, bpref); if (bno < 0) goto fail; gotit: #ifdef INVARIANTS if (isset(bbp, bno)) { printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", cg, (intmax_t)bno, fs->e2fs_fsmnt); panic("ext2fs_alloccg: dup alloc"); } #endif setbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, -1); fs->e2fs_fbcount--; e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); ext2_gd_b_bitmap_csum_set(fs, cg, bp); bdwrite(bp); - return (((uint64_t)cg) * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); + return (((uint64_t)cg) * fs->e2fs_fpg + + le32toh(fs->e2fs->e2fs_first_dblock) + bno); fail: brelse(bp); EXT2_LOCK(ump); return (0); } /* * Determine whether a cluster can be allocated. */ static daddr_t ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp; char *bbp; int bit, error, got, i, loc, run; int32_t *lp; daddr_t bno; fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_maxcluster[cg] < len) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) goto fail_lock; bbp = (char *)bp->b_data; EXT2_LOCK(ump); /* * Check to see if a cluster of the needed size (or bigger) is * available in this cylinder group. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len]; for (i = len; i <= fs->e2fs_contigsumsize; i++) if (*lp++ > 0) break; if (i > fs->e2fs_contigsumsize) { /* * Update the cluster summary information to reflect * the true maximum-sized cluster so that future cluster * allocation requests can avoid reading the bitmap only * to find no cluster. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; for (i = len - 1; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; goto fail; } EXT2_UNLOCK(ump); /* Search the bitmap to find a big enough cluster like in FFS. */ if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) bpref = dtogd(fs, bpref); loc = bpref / NBBY; bit = 1 << (bpref % NBBY); - for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { + for (run = 0, got = bpref; got < fs->e2fs_fpg; got++) { if ((bbp[loc] & bit) != 0) run = 0; else { run++; if (run == len) break; } if ((got & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } - if (got >= fs->e2fs->e2fs_fpg) + if (got >= fs->e2fs_fpg) goto fail_lock; /* Allocate the cluster that we found. */ for (i = 1; i < len; i++) if (!isclr(bbp, got - run + i)) panic("ext2_clusteralloc: map mismatch"); bno = got - run + 1; - if (bno >= fs->e2fs->e2fs_fpg) + if (bno >= fs->e2fs_fpg) panic("ext2_clusteralloc: allocated out of group"); EXT2_LOCK(ump); for (i = 0; i < len; i += fs->e2fs_fpb) { setbit(bbp, bno + i); ext2_clusteracct(fs, bbp, cg, bno + i, -1); fs->e2fs_fbcount--; e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); - return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); + return (cg * fs->e2fs_fpg + le32toh(fs->e2fs->e2fs_first_dblock) + + bno); fail_lock: EXT2_LOCK(ump); fail: brelse(bp); return (0); } static int ext2_zero_inode_table(struct inode *ip, int cg) { struct m_ext2fs *fs; struct buf *bp; int i, all_blks, used_blks; fs = ip->i_e2fs; - if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) + if (le16toh(fs->e2fs_gd[cg].ext4bgd_flags) & EXT2_BG_INODE_ZEROED) return (0); - all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / + all_blks = le16toh(fs->e2fs->e2fs_inode_size) * fs->e2fs_ipg / fs->e2fs_bsize; - used_blks = howmany(fs->e2fs->e2fs_ipg - + used_blks = howmany(fs->e2fs_ipg - e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]), fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); for (i = 0; i < all_blks - used_blks; i++) { bp = getblk(ip->i_devvp, fsbtodb(fs, e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + used_blks + i), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); bawrite(bp); } - fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; + fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh( + fs->e2fs_gd[cg].ext4bgd_flags) | EXT2_BG_INODE_ZEROED); return (0); } static void ext2_fix_bitmap_tail(unsigned char *bitmap, int first, int last) { int i; for (i = first; i <= last; i++) bitmap[i] = 0xff; } /* * Determine whether an inode can be allocated. * * Check to see if an inode is available, and if it is, * allocate it using tode in the specified cylinder group. */ static daddr_t ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int error, start, len, ifree, ibytes; char *ibp, *loc; ipref--; /* to avoid a lot of (ipref -1) */ if (ipref == -1) ipref = 0; fs = ip->i_e2fs; ump = ip->i_ump; if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { EXT2_LOCK(ump); return (0); } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { - if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { + if (le16toh(fs->e2fs_gd[cg].ext4bgd_flags) & + EXT2_BG_INODE_UNINIT) { ibytes = fs->e2fs_ipg / 8; memset(bp->b_data, 0, ibytes - 1); ext2_fix_bitmap_tail(bp->b_data, ibytes, fs->e2fs_bsize - 1); - fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; + fs->e2fs_gd[cg].ext4bgd_flags = htole16(le16toh( + fs->e2fs_gd[cg].ext4bgd_flags) & + ~EXT2_BG_INODE_UNINIT); } ext2_gd_i_bitmap_csum_set(fs, cg, bp); error = ext2_zero_inode_table(ip, cg); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } } error = ext2_gd_i_bitmap_csum_verify(fs, cg, bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) { /* * Another thread allocated the last i-node in this * group while we were waiting for the buffer. */ brelse(bp); EXT2_LOCK(ump); return (0); } ibp = (char *)bp->b_data; if (ipref) { - ipref %= fs->e2fs->e2fs_ipg; + ipref %= fs->e2fs_ipg; if (isclr(ibp, ipref)) goto gotit; } start = ipref / NBBY; - len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); + len = howmany(fs->e2fs_ipg - ipref, NBBY); loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { - SDT_PROBE3(ext2fs, , alloc, ext2_nodealloccg_bmap_corrupted, - cg, ipref, fs->e2fs_fsmnt); + SDT_PROBE3(ext2fs, , alloc, + ext2_nodealloccg_bmap_corrupted, cg, ipref, + fs->e2fs_fsmnt); brelse(bp); EXT2_LOCK(ump); return (0); } } ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; gotit: setbit(ibp, ipref); EXT2_LOCK(ump); e2fs_gd_set_nifree(&fs->e2fs_gd[cg], e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) - 1); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { - ifree = fs->e2fs->e2fs_ipg - e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]); + ifree = fs->e2fs_ipg - e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]); if (ipref + 1 > ifree) e2fs_gd_set_i_unused(&fs->e2fs_gd[cg], - fs->e2fs->e2fs_ipg - (ipref + 1)); + fs->e2fs_ipg - (ipref + 1)); } - fs->e2fs->e2fs_ficount--; + fs->e2fs_ficount--; fs->e2fs_fmod = 1; if ((mode & IFMT) == IFDIR) { e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) + 1); fs->e2fs_total_dir++; } EXT2_UNLOCK(ump); ext2_gd_i_bitmap_csum_set(fs, cg, bp); bdwrite(bp); return ((uint64_t)cg * fs->e2fs_ipg + ipref + 1); } /* * Free a block or fragment. * */ void ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int cg, error; char *bbp; fs = ip->i_e2fs; ump = ip->i_ump; cg = dtog(fs, bno); if (bno >= fs->e2fs_bcount) { - SDT_PROBE2(ext2fs, , alloc, ext2_blkfree_bad_block, ip->i_number, bno); + SDT_PROBE2(ext2fs, , alloc, ext2_blkfree_bad_block, + ip->i_number, bno); return; } error = bread(ip->i_devvp, fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { return; } bbp = (char *)bp->b_data; bno = dtogd(fs, bno); if (isclr(bbp, bno)) { panic("ext2_blkfree: freeing free block %lld, fs=%s", (long long)bno, fs->e2fs_fsmnt); } clrbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, 1); fs->e2fs_fbcount++; e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) + 1); fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); ext2_gd_b_bitmap_csum_set(fs, cg, bp); bdwrite(bp); } /* * Free an inode. * */ int ext2_vfree(struct vnode *pvp, ino_t ino, int mode) { struct m_ext2fs *fs; struct inode *pip; struct buf *bp; struct ext2mount *ump; int error, cg; char *ibp; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); cg = ino_to_cg(fs, ino); error = bread(pip->i_devvp, fsbtodb(fs, e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { return (0); } ibp = (char *)bp->b_data; - ino = (ino - 1) % fs->e2fs->e2fs_ipg; + ino = (ino - 1) % fs->e2fs_ipg; if (isclr(ibp, ino)) { SDT_PROBE2(ext2fs, , alloc, ext2_vfree_doublefree, fs->e2fs_fsmnt, ino); if (fs->e2fs_ronly == 0) panic("ext2_vfree: freeing free inode"); } clrbit(ibp, ino); EXT2_LOCK(ump); - fs->e2fs->e2fs_ficount++; + fs->e2fs_ficount++; e2fs_gd_set_nifree(&fs->e2fs_gd[cg], e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) + 1); if ((mode & IFMT) == IFDIR) { e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) - 1); fs->e2fs_total_dir--; } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); ext2_gd_i_bitmap_csum_set(fs, cg, bp); bdwrite(bp); return (0); } /* * Find a block in the specified cylinder group. * * It is a panic if a request is made to find a block if none are * available. */ static daddr_t ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) { char *loc; int start, len; /* * find the fragment by searching through the free block * map for an appropriate bit pattern */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; - len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; + len = howmany(fs->e2fs_fpg, NBBY) - start; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { - panic("ext2_mapsearch: map corrupted: start=%d, len=%d, fs=%s", - start, len, fs->e2fs_fsmnt); + panic("ext2_mapsearch: map corrupted: start=%d, len=%d," + "fs=%s", start, len, fs->e2fs_fsmnt); /* NOTREACHED */ } } return ((loc - bbp) * NBBY + ffs(~*loc) - 1); } int ext2_cg_has_sb(struct m_ext2fs *fs, int cg) { int a3, a5, a7; if (cg == 0) return (1); if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { - if (cg == fs->e2fs->e4fs_backup_bgs[0] || - cg == fs->e2fs->e4fs_backup_bgs[1]) + if (cg == le32toh(fs->e2fs->e4fs_backup_bgs[0]) || + cg == le32toh(fs->e2fs->e4fs_backup_bgs[1])) return (1); return (0); } if ((cg <= 1) || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) return (1); if (!(cg & 1)) return (0); for (a3 = 3, a5 = 5, a7 = 7; a3 <= cg || a5 <= cg || a7 <= cg; a3 *= 3, a5 *= 5, a7 *= 7) if (cg == a3 || cg == a5 || cg == a7) return (1); return (0); } Index: head/sys/fs/ext2fs/ext2_balloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_balloc.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_balloc.c (revision 361136) @@ -1,323 +1,324 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_balloc.c 8.4 (Berkeley) 9/23/93 * $FreeBSD$ */ #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include static int ext2_ext_balloc(struct inode *ip, uint32_t lbn, int size, struct ucred *cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; struct buf *bp = NULL; struct vnode *vp = ITOV(ip); daddr_t newblk; int blks, error, allocated; fs = ip->i_e2fs; blks = howmany(size, fs->e2fs_bsize); error = ext4_ext_get_blocks(ip, lbn, blks, cred, NULL, &allocated, &newblk); if (error) return (error); if (allocated) { bp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); if(!bp) return (EIO); } else { error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } } bp->b_blkno = fsbtodb(fs, newblk); if (flags & BA_CLRBUF) vfs_bio_clrbuf(bp); *bpp = bp; return (error); } /* * Balloc defines the structure of filesystem storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ int ext2_balloc(struct inode *ip, e2fs_lbn_t lbn, int size, struct ucred *cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[EXT2_NIADDR + 2]; e4fs_daddr_t nb, newb; e2fs_daddr_t *bap, pref; int num, i, error; *bpp = NULL; if (lbn < 0) return (EFBIG); fs = ip->i_e2fs; ump = ip->i_ump; /* * check if this is a sequential block allocation. * If so, increment next_alloc fields to allow ext2_blkpref * to make a good guess */ if (lbn == ip->i_next_alloc_block + 1) { ip->i_next_alloc_block++; ip->i_next_alloc_goal++; } if (ip->i_flag & IN_E4EXTENTS) return (ext2_ext_balloc(ip, lbn, size, cred, bpp, flags)); /* * The first EXT2_NDADDR blocks are direct blocks */ if (lbn < EXT2_NDADDR) { nb = ip->i_db[lbn]; /* * no new block is to be allocated, and no need to expand * the file */ if (nb != 0) { error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } bp->b_blkno = fsbtodb(fs, nb); if (ip->i_size >= (lbn + 1) * fs->e2fs_bsize) { *bpp = bp; return (0); } } else { EXT2_LOCK(ump); error = ext2_alloc(ip, lbn, ext2_blkpref(ip, lbn, (int)lbn, &ip->i_db[0], 0), fs->e2fs_bsize, cred, &newb); if (error) return (error); /* * If the newly allocated block exceeds 32-bit limit, * we can not use it in file block maps. */ if (newb > UINT_MAX) return (EFBIG); bp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(bp); } ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bpp = bp; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ext2_getlbns(vp, lbn, indirs, &num)) != 0) return (error); #ifdef INVARIANTS if (num < 1) panic("ext2_balloc: ext2_getlbns returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ib[indirs[0].in_off]; if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[0].in_off + EXT2_NDIR_BLOCKS, &ip->i_db[0], 0); if ((error = ext2_alloc(ip, lbn, pref, fs->e2fs_bsize, cred, &newb))) return (error); if (newb > UINT_MAX) return (EFBIG); nb = newb; bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); vfs_bio_clrbuf(bp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); return (error); } ip->i_ib[indirs[0].in_off] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } bap = (e2fs_daddr_t *)bp->b_data; - nb = bap[indirs[i].in_off]; + nb = le32toh(bap[indirs[i].in_off]); if (i == num) break; i += 1; if (nb != 0) { bqrelse(bp); continue; } EXT2_LOCK(ump); if (pref == 0) pref = ext2_blkpref(ip, lbn, indirs[i].in_off, bap, bp->b_lblkno); error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb); if (error) { brelse(bp); return (error); } if (newb > UINT_MAX) return (EFBIG); nb = newb; nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); vfs_bio_clrbuf(nbp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); brelse(bp); return (error); } - bap[indirs[i - 1].in_off] = nb; + bap[indirs[i - 1].in_off] = htole32(nb); /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[i].in_off, &bap[0], bp->b_lblkno); if ((error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb)) != 0) { brelse(bp); return (error); } if (newb > UINT_MAX) return (EFBIG); nb = newb; nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(nbp); - bap[indirs[i].in_off] = nb; + bap[indirs[i].in_off] = htole32(nb); /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } *bpp = nbp; return (0); } brelse(bp); if (flags & BA_CLRBUF) { int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT; if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, (int)fs->e2fs_bsize, NOCRED, MAXBSIZE, seqcount, 0, &nbp); } else { error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp); } if (error) { brelse(nbp); return (error); } } else { nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); } *bpp = nbp; return (0); } Index: head/sys/fs/ext2fs/ext2_bmap.c =================================================================== --- head/sys/fs/ext2fs/ext2_bmap.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_bmap.c (revision 361136) @@ -1,510 +1,515 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95 * $FreeBSD$ */ #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include /* * Bmap converts the logical block number of a file to its physical block * number on the disk. The conversion is done by using the logical block * number to index into the array of block pointers described by the dinode. */ int ext2_bmap(struct vop_bmap_args *ap) { daddr_t blkno; int error; /* * Check for underlying vnode requests and ensure that logical * to physical mapping is requested. */ if (ap->a_bop != NULL) *ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj; if (ap->a_bnp == NULL) return (0); if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS) error = ext4_bmapext(ap->a_vp, ap->a_bn, &blkno, ap->a_runp, ap->a_runb); else error = ext2_bmaparray(ap->a_vp, ap->a_bn, &blkno, ap->a_runp, ap->a_runb); *ap->a_bnp = blkno; return (error); } /* * Convert the logical block number of a file to its physical block number * on the disk within ext4 extents. */ int ext4_bmapext(struct vnode *vp, int32_t bn, int64_t *bnp, int *runp, int *runb) { struct inode *ip; struct m_ext2fs *fs; struct mount *mp; struct ext2mount *ump; struct ext4_extent_header *ehp; struct ext4_extent *ep; struct ext4_extent_path *path = NULL; daddr_t lbn; int error, depth, maxrun = 0, bsize; ip = VTOI(vp); fs = ip->i_e2fs; mp = vp->v_mount; ump = VFSTOEXT2(mp); lbn = bn; ehp = (struct ext4_extent_header *)ip->i_data; - depth = ehp->eh_depth; + depth = le16toh(ehp->eh_depth); bsize = EXT2_BLOCK_SIZE(ump->um_e2fs); *bnp = -1; if (runp != NULL) { maxrun = mp->mnt_iosize_max / bsize - 1; *runp = 0; } if (runb != NULL) *runb = 0; error = ext4_ext_find_extent(ip, lbn, &path); if (error) return (error); ep = path[depth].ep_ext; if(ep) { - if (lbn < ep->e_blk) { + if (lbn < le32toh(ep->e_blk)) { if (runp != NULL) { - *runp = min(maxrun, ep->e_blk - lbn - 1); + *runp = min(maxrun, le32toh(ep->e_blk) - lbn - 1); } - } else if (ep->e_blk <= lbn && lbn < ep->e_blk + ep->e_len) { - *bnp = fsbtodb(fs, lbn - ep->e_blk + - (ep->e_start_lo | (daddr_t)ep->e_start_hi << 32)); + } else if (le32toh(ep->e_blk) <= lbn && + lbn < le32toh(ep->e_blk) + le16toh(ep->e_len)) { + *bnp = fsbtodb(fs, lbn - le32toh(ep->e_blk) + + (le32toh(ep->e_start_lo) | + (daddr_t)le16toh(ep->e_start_hi) << 32)); if (runp != NULL) { *runp = min(maxrun, - ep->e_len - (lbn - ep->e_blk) - 1); + le16toh(ep->e_len) - + (lbn - le32toh(ep->e_blk)) - 1); } if (runb != NULL) - *runb = min(maxrun, lbn - ep->e_blk); + *runb = min(maxrun, lbn - le32toh(ep->e_blk)); } else { if (runb != NULL) - *runb = min(maxrun, ep->e_blk + lbn - ep->e_len); + *runb = min(maxrun, le32toh(ep->e_blk) + lbn - + le16toh(ep->e_len)); } } ext4_ext_path_free(path); return (error); } static int readindir(struct vnode *vp, e2fs_lbn_t lbn, e2fs_daddr_t daddr, struct buf **bpp) { struct buf *bp; struct mount *mp; struct ext2mount *ump; int error; mp = vp->v_mount; ump = VFSTOEXT2(mp); bp = getblk(vp, lbn, mp->mnt_stat.f_iosize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { KASSERT(daddr != 0, ("readindir: indirect block not in cache")); bp->b_blkno = blkptrtodb(ump, daddr); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); #ifdef RACCT if (racct_enable) { PROC_LOCK(curproc); racct_add_buf(curproc, bp, 0); PROC_UNLOCK(curproc); } #endif curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error != 0) { brelse(bp); return (error); } } *bpp = bp; return (0); } /* * Indirect blocks are now on the vnode for the file. They are given negative * logical block numbers. Indirect blocks are addressed by the negative * address of the first data block to which they point. Double indirect blocks * are addressed by one less than the address of the first indirect block to * which they point. Triple indirect blocks are addressed by one less than * the address of the first double indirect block to which they point. * * ext2_bmaparray does the bmap conversion, and if requested returns the * array of logical blocks which must be traversed to get to a block. * Each entry contains the offset into that block that gets you to the * next block and the disk address of the block (if it is assigned). */ int ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb) { struct inode *ip; struct buf *bp; struct ext2mount *ump; struct mount *mp; struct indir a[EXT2_NIADDR + 1], *ap; daddr_t daddr; e2fs_lbn_t metalbn; int error, num, maxrun = 0, bsize; int *nump; ap = NULL; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOEXT2(mp); bsize = EXT2_BLOCK_SIZE(ump->um_e2fs); if (runp) { maxrun = mp->mnt_iosize_max / bsize - 1; *runp = 0; } if (runb) *runb = 0; ap = a; nump = # error = ext2_getlbns(vp, bn, ap, nump); if (error) return (error); num = *nump; if (num == 0) { *bnp = blkptrtodb(ump, ip->i_db[bn]); if (*bnp == 0) { *bnp = -1; } else if (runp) { daddr_t bnb = bn; for (++bn; bn < EXT2_NDADDR && *runp < maxrun && is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); ++bn, ++*runp); bn = bnb; if (runb && (bn > 0)) { for (--bn; (bn >= 0) && (*runb < maxrun) && is_sequential(ump, ip->i_db[bn], ip->i_db[bn + 1]); --bn, ++*runb); } } return (0); } /* Get disk address out of indirect block array */ daddr = ip->i_ib[ap->in_off]; for (bp = NULL, ++ap; --num; ++ap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = ap->in_lbn; if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) break; /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) bqrelse(bp); error = readindir(vp, metalbn, daddr, &bp); if (error != 0) return (error); - daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off]; + daddr = le32toh(((e2fs_daddr_t *)bp->b_data)[ap->in_off]); if (num == 1 && daddr && runp) { for (bn = ap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn - 1], ((e2fs_daddr_t *)bp->b_data)[bn]); ++bn, ++*runp); bn = ap->in_off; if (runb && bn) { for (--bn; bn >= 0 && *runb < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn], ((e2fs_daddr_t *)bp->b_data)[bn + 1]); --bn, ++*runb); } } } if (bp) bqrelse(bp); /* * Since this is FFS independent code, we are out of scope for the * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they * will fall in the range 1..um_seqinc, so we use that test and * return a request for a zeroed out buffer if attempts are made * to read a BLK_NOCOPY or BLK_SNAP block. */ if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc) { *bnp = -1; return (0); } *bnp = blkptrtodb(ump, daddr); if (*bnp == 0) { *bnp = -1; } return (0); } static e2fs_lbn_t lbn_count(struct ext2mount *ump, int level) { e2fs_lbn_t blockcnt; for (blockcnt = 1; level > 0; level--) blockcnt *= MNINDIR(ump); return (blockcnt); } int ext2_bmap_seekdata(struct vnode *vp, off_t *offp) { struct buf *bp; struct indir a[EXT2_NIADDR + 1], *ap; struct inode *ip; struct mount *mp; struct ext2mount *ump; e2fs_daddr_t bn, daddr, nextbn; uint64_t bsize; off_t numblks; int error, num, num1, off; bp = NULL; error = 0; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOEXT2(mp); if (vp->v_type != VREG || (ip->i_flags & SF_SNAPSHOT) != 0) return (EINVAL); if (*offp < 0 || *offp >= ip->i_size) return (ENXIO); bsize = mp->mnt_stat.f_iosize; for (bn = *offp / bsize, numblks = howmany(ip->i_size, bsize); bn < numblks; bn = nextbn) { if (bn < EXT2_NDADDR) { daddr = ip->i_db[bn]; if (daddr != 0) break; nextbn = bn + 1; continue; } ap = a; error = ext2_getlbns(vp, bn, ap, &num); if (error != 0) break; MPASS(num >= 2); daddr = ip->i_ib[ap->in_off]; ap++, num--; for (nextbn = EXT2_NDADDR, num1 = num - 1; num1 > 0; num1--) nextbn += lbn_count(ump, num1); if (daddr == 0) { nextbn += lbn_count(ump, num); continue; } for (; daddr != 0 && num > 0; ap++, num--) { if (bp != NULL) bqrelse(bp); error = readindir(vp, ap->in_lbn, daddr, &bp); if (error != 0) return (error); /* * Scan the indirect block until we find a non-zero * pointer. */ off = ap->in_off; do { - daddr = ((e2fs_daddr_t *)bp->b_data)[off]; + daddr = le32toh(((e2fs_daddr_t *)bp->b_data)[off]); } while (daddr == 0 && ++off < MNINDIR(ump)); nextbn += off * lbn_count(ump, num - 1); /* * We need to recompute the LBNs of indirect * blocks, so restart with the updated block offset. */ if (off != ap->in_off) break; } if (num == 0) { /* * We found a data block. */ bn = nextbn; break; } } if (bp != NULL) bqrelse(bp); if (bn >= numblks) error = ENXIO; if (error == 0 && *offp < bn * bsize) *offp = bn * bsize; return (error); } /* * Create an array of logical block number/offset pairs which represent the * path of indirect blocks required to access a data block. The first "pair" * contains the logical block number of the appropriate single, double or * triple indirect block and the offset into the inode indirect block array. * Note, the logical block number of the inode single/double/triple indirect * block appears twice in the array, once with the offset into the i_ib and * once with the offset into the page itself. */ int ext2_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump) { long blockcnt; e2fs_lbn_t metalbn, realbn; struct ext2mount *ump; int i, numlevels, off; int64_t qblockcnt; ump = VFSTOEXT2(vp->v_mount); if (nump) *nump = 0; numlevels = 0; realbn = bn; if ((long)bn < 0) bn = -(long)bn; /* The first EXT2_NDADDR blocks are direct blocks. */ if (bn < EXT2_NDADDR) return (0); /* * Determine the number of levels of indirection. After this loop * is done, blockcnt indicates the number of data blocks possible * at the previous level of indirection, and EXT2_NIADDR - i is the * number of levels of indirection needed to locate the requested block. */ for (blockcnt = 1, i = EXT2_NIADDR, bn -= EXT2_NDADDR; ; i--, bn -= blockcnt) { if (i == 0) return (EFBIG); /* * Use int64_t's here to avoid overflow for triple indirect * blocks when longs have 32 bits and the block size is more * than 4K. */ qblockcnt = (int64_t)blockcnt * MNINDIR(ump); if (bn < qblockcnt) break; blockcnt = qblockcnt; } /* Calculate the address of the first meta-block. */ if (realbn >= 0) metalbn = -(realbn - bn + EXT2_NIADDR - i); else metalbn = -(-realbn - bn + EXT2_NIADDR - i); /* * At each iteration, off is the offset into the bap array which is * an array of disk addresses at the current level of indirection. * The logical block number and the offset in that block are stored * into the argument array. */ ap->in_lbn = metalbn; ap->in_off = off = EXT2_NIADDR - i; ap++; for (++numlevels; i <= EXT2_NIADDR; i++) { /* If searching for a meta-data block, quit when found. */ if (metalbn == realbn) break; off = (bn / blockcnt) % MNINDIR(ump); ++numlevels; ap->in_lbn = metalbn; ap->in_off = off; ++ap; metalbn -= -1 + off * blockcnt; blockcnt /= MNINDIR(ump); } if (nump) *nump = numlevels; return (0); } Index: head/sys/fs/ext2fs/ext2_csum.c =================================================================== --- head/sys/fs/ext2fs/ext2_csum.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_csum.c (revision 361136) @@ -1,779 +1,789 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017, Fedor Uporov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , trace, csum, "int", "char*"); #define EXT2_BG_INODE_BITMAP_CSUM_HI_END \ (offsetof(struct ext2_gd, ext4bgd_i_bmap_csum_hi) + \ sizeof(uint16_t)) #define EXT2_INODE_CSUM_HI_EXTRA_END \ (offsetof(struct ext2fs_dinode, e2di_chksum_hi) + sizeof(uint16_t) - \ E2FS_REV0_INODE_SIZE) #define EXT2_BG_BLOCK_BITMAP_CSUM_HI_LOCATION \ (offsetof(struct ext2_gd, ext4bgd_b_bmap_csum_hi) + \ sizeof(uint16_t)) void ext2_sb_csum_set_seed(struct m_ext2fs *fs) { if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_CSUM_SEED)) - fs->e2fs_csum_seed = fs->e2fs->e4fs_chksum_seed; + fs->e2fs_csum_seed = le32toh(fs->e2fs->e4fs_chksum_seed); else if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { fs->e2fs_csum_seed = calculate_crc32c(~0, fs->e2fs->e2fs_uuid, sizeof(fs->e2fs->e2fs_uuid)); } else fs->e2fs_csum_seed = 0; } int ext2_sb_csum_verify(struct m_ext2fs *fs) { if (fs->e2fs->e4fs_chksum_type != EXT4_CRC32C_CHKSUM) { printf( "WARNING: mount of %s denied due bad sb csum type\n", fs->e2fs_fsmnt); return (EINVAL); } - if (fs->e2fs->e4fs_sbchksum != + if (le32toh(fs->e2fs->e4fs_sbchksum) != calculate_crc32c(~0, (const char *)fs->e2fs, offsetof(struct ext2fs, e4fs_sbchksum))) { printf( "WARNING: mount of %s denied due bad sb csum=0x%x, expected=0x%x - run fsck\n", - fs->e2fs_fsmnt, fs->e2fs->e4fs_sbchksum, calculate_crc32c(~0, - (const char *)fs->e2fs, offsetof(struct ext2fs, e4fs_sbchksum))); + fs->e2fs_fsmnt, le32toh(fs->e2fs->e4fs_sbchksum), + calculate_crc32c(~0, (const char *)fs->e2fs, + offsetof(struct ext2fs, e4fs_sbchksum))); return (EINVAL); } return (0); } void ext2_sb_csum_set(struct m_ext2fs *fs) { - fs->e2fs->e4fs_sbchksum = calculate_crc32c(~0, (const char *)fs->e2fs, - offsetof(struct ext2fs, e4fs_sbchksum)); + fs->e2fs->e4fs_sbchksum = + htole32(calculate_crc32c(~0, (const char *)fs->e2fs, + offsetof(struct ext2fs, e4fs_sbchksum))); } static uint32_t ext2_extattr_blk_csum(struct inode *ip, uint64_t facl, struct ext2fs_extattr_header *header) { struct m_ext2fs *fs; - uint32_t crc, old_crc; + uint32_t crc, dummy_crc = 0; + uint64_t facl_bn = htole64(facl); + int offset = offsetof(struct ext2fs_extattr_header, h_checksum); fs = ip->i_e2fs; - old_crc = header->h_checksum; + crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&facl_bn, + sizeof(facl_bn)); + crc = calculate_crc32c(crc, (uint8_t *)header, offset); + crc = calculate_crc32c(crc, (uint8_t *)&dummy_crc, + sizeof(dummy_crc)); + offset += sizeof(dummy_crc); + crc = calculate_crc32c(crc, (uint8_t *)header + offset, + fs->e2fs_bsize - offset); - header->h_checksum = 0; - crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&facl, sizeof(facl)); - crc = calculate_crc32c(crc, (uint8_t *)header, fs->e2fs_bsize); - header->h_checksum = old_crc; - - return (crc); + return (htole32(crc)); } int ext2_extattr_blk_csum_verify(struct inode *ip, struct buf *bp) { struct ext2fs_extattr_header *header; header = (struct ext2fs_extattr_header *)bp->b_data; if (EXT2_HAS_RO_COMPAT_FEATURE(ip->i_e2fs, EXT2F_ROCOMPAT_METADATA_CKSUM) && (header->h_checksum != ext2_extattr_blk_csum(ip, ip->i_facl, header))) { SDT_PROBE2(ext2fs, , trace, csum, 1, "bad extattr csum detected"); return (EIO); } return (0); } void ext2_extattr_blk_csum_set(struct inode *ip, struct buf *bp) { struct ext2fs_extattr_header *header; if (!EXT2_HAS_RO_COMPAT_FEATURE(ip->i_e2fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; header = (struct ext2fs_extattr_header *)bp->b_data; header->h_checksum = ext2_extattr_blk_csum(ip, ip->i_facl, header); } void ext2_init_dirent_tail(struct ext2fs_direct_tail *tp) { memset(tp, 0, sizeof(struct ext2fs_direct_tail)); - tp->e2dt_rec_len = sizeof(struct ext2fs_direct_tail); + tp->e2dt_rec_len = le16toh(sizeof(struct ext2fs_direct_tail)); tp->e2dt_reserved_ft = EXT2_FT_DIR_CSUM; } int ext2_is_dirent_tail(struct inode *ip, struct ext2fs_direct_2 *ep) { struct m_ext2fs *fs; struct ext2fs_direct_tail *tp; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (0); tp = (struct ext2fs_direct_tail *)ep; if (tp->e2dt_reserved_zero1 == 0 && - tp->e2dt_rec_len == sizeof(struct ext2fs_direct_tail) && + le16toh(tp->e2dt_rec_len) == sizeof(struct ext2fs_direct_tail) && tp->e2dt_reserved_zero2 == 0 && tp->e2dt_reserved_ft == EXT2_FT_DIR_CSUM) return (1); return (0); } struct ext2fs_direct_tail * ext2_dirent_get_tail(struct inode *ip, struct ext2fs_direct_2 *ep) { struct ext2fs_direct_2 *dep; void *top; unsigned int rec_len; dep = ep; top = EXT2_DIRENT_TAIL(ep, ip->i_e2fs->e2fs_bsize); - rec_len = dep->e2d_reclen; + rec_len = le16toh(dep->e2d_reclen); while (rec_len && !(rec_len & 0x3)) { dep = (struct ext2fs_direct_2 *)(((char *)dep) + rec_len); if ((void *)dep >= top) break; - rec_len = dep->e2d_reclen; + rec_len = le16toh(dep->e2d_reclen); } if (dep != top) return (NULL); if (ext2_is_dirent_tail(ip, dep)) return ((struct ext2fs_direct_tail *)dep); return (NULL); } static uint32_t ext2_dirent_csum(struct inode *ip, struct ext2fs_direct_2 *ep, int size) { struct m_ext2fs *fs; char *buf; uint32_t inum, gen, crc; fs = ip->i_e2fs; buf = (char *)ep; - inum = ip->i_number; - gen = ip->i_gen; + inum = htole32(ip->i_number); + gen = htole32(ip->i_gen); crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&inum, sizeof(inum)); crc = calculate_crc32c(crc, (uint8_t *)&gen, sizeof(gen)); crc = calculate_crc32c(crc, (uint8_t *)buf, size); return (crc); } int ext2_dirent_csum_verify(struct inode *ip, struct ext2fs_direct_2 *ep) { uint32_t calculated; struct ext2fs_direct_tail *tp; tp = ext2_dirent_get_tail(ip, ep); if (tp == NULL) return (0); calculated = ext2_dirent_csum(ip, ep, (char *)tp - (char *)ep); - if (calculated != tp->e2dt_checksum) + if (calculated != le32toh(tp->e2dt_checksum)) return (EIO); return (0); } static struct ext2fs_htree_count * ext2_get_dx_count(struct inode *ip, struct ext2fs_direct_2 *ep, int *offset) { struct ext2fs_direct_2 *dp; struct ext2fs_htree_root_info *root; int count_offset; - if (ep->e2d_reclen == EXT2_BLOCK_SIZE(ip->i_e2fs)) + if (le16toh(ep->e2d_reclen) == EXT2_BLOCK_SIZE(ip->i_e2fs)) count_offset = 8; - else if (ep->e2d_reclen == 12) { + else if (le16toh(ep->e2d_reclen) == 12) { dp = (struct ext2fs_direct_2 *)(((char *)ep) + 12); - if (dp->e2d_reclen != EXT2_BLOCK_SIZE(ip->i_e2fs) - 12) + if (le16toh(dp->e2d_reclen) != EXT2_BLOCK_SIZE(ip->i_e2fs) - 12) return (NULL); root = (struct ext2fs_htree_root_info *)(((char *)dp + 12)); if (root->h_reserved1 || root->h_info_len != sizeof(struct ext2fs_htree_root_info)) return (NULL); count_offset = 32; } else return (NULL); if (offset) *offset = count_offset; return ((struct ext2fs_htree_count *)(((char *)ep) + count_offset)); } static uint32_t ext2_dx_csum(struct inode *ip, struct ext2fs_direct_2 *ep, int count_offset, int count, struct ext2fs_htree_tail *tp) { struct m_ext2fs *fs; char *buf; int size; uint32_t inum, old_csum, gen, crc; fs = ip->i_e2fs; buf = (char *)ep; size = count_offset + (count * sizeof(struct ext2fs_htree_entry)); old_csum = tp->ht_checksum; tp->ht_checksum = 0; - inum = ip->i_number; - gen = ip->i_gen; + inum = htole32(ip->i_number); + gen = htole32(ip->i_gen); crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&inum, sizeof(inum)); crc = calculate_crc32c(crc, (uint8_t *)&gen, sizeof(gen)); crc = calculate_crc32c(crc, (uint8_t *)buf, size); crc = calculate_crc32c(crc, (uint8_t *)tp, sizeof(struct ext2fs_htree_tail)); tp->ht_checksum = old_csum; - return (crc); + return htole32(crc); } int ext2_dx_csum_verify(struct inode *ip, struct ext2fs_direct_2 *ep) { uint32_t calculated; struct ext2fs_htree_count *cp; struct ext2fs_htree_tail *tp; int count_offset, limit, count; cp = ext2_get_dx_count(ip, ep, &count_offset); if (cp == NULL) return (0); - limit = cp->h_entries_max; - count = cp->h_entries_num; + limit = le16toh(cp->h_entries_max); + count = le16toh(cp->h_entries_num); if (count_offset + (limit * sizeof(struct ext2fs_htree_entry)) > ip->i_e2fs->e2fs_bsize - sizeof(struct ext2fs_htree_tail)) return (EIO); tp = (struct ext2fs_htree_tail *)(((struct ext2fs_htree_entry *)cp) + limit); calculated = ext2_dx_csum(ip, ep, count_offset, count, tp); if (tp->ht_checksum != calculated) return (EIO); return (0); } int ext2_dir_blk_csum_verify(struct inode *ip, struct buf *bp) { struct m_ext2fs *fs; struct ext2fs_direct_2 *ep; int error = 0; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (error); ep = (struct ext2fs_direct_2 *)bp->b_data; if (ext2_dirent_get_tail(ip, ep) != NULL) error = ext2_dirent_csum_verify(ip, ep); else if (ext2_get_dx_count(ip, ep, NULL) != NULL) error = ext2_dx_csum_verify(ip, ep); if (error) SDT_PROBE2(ext2fs, , trace, csum, 1, "bad directory csum detected"); return (error); } void ext2_dirent_csum_set(struct inode *ip, struct ext2fs_direct_2 *ep) { struct m_ext2fs *fs; struct ext2fs_direct_tail *tp; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; tp = ext2_dirent_get_tail(ip, ep); if (tp == NULL) return; tp->e2dt_checksum = - ext2_dirent_csum(ip, ep, (char *)tp - (char *)ep); + htole32(ext2_dirent_csum(ip, ep, (char *)tp - (char *)ep)); } void ext2_dx_csum_set(struct inode *ip, struct ext2fs_direct_2 *ep) { struct m_ext2fs *fs; struct ext2fs_htree_count *cp; struct ext2fs_htree_tail *tp; int count_offset, limit, count; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; cp = ext2_get_dx_count(ip, ep, &count_offset); if (cp == NULL) return; - limit = cp->h_entries_max; - count = cp->h_entries_num; + limit = le16toh(cp->h_entries_max); + count = le16toh(cp->h_entries_num); if (count_offset + (limit * sizeof(struct ext2fs_htree_entry)) > ip->i_e2fs->e2fs_bsize - sizeof(struct ext2fs_htree_tail)) return; tp = (struct ext2fs_htree_tail *)(((struct ext2fs_htree_entry *)cp) + limit); tp->ht_checksum = ext2_dx_csum(ip, ep, count_offset, count, tp); } static uint32_t ext2_extent_blk_csum(struct inode *ip, struct ext4_extent_header *ehp) { struct m_ext2fs *fs; size_t size; uint32_t inum, gen, crc; fs = ip->i_e2fs; size = EXT4_EXTENT_TAIL_OFFSET(ehp) + offsetof(struct ext4_extent_tail, et_checksum); - inum = ip->i_number; - gen = ip->i_gen; + inum = htole32(ip->i_number); + gen = htole32(ip->i_gen); crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&inum, sizeof(inum)); crc = calculate_crc32c(crc, (uint8_t *)&gen, sizeof(gen)); crc = calculate_crc32c(crc, (uint8_t *)ehp, size); return (crc); } int ext2_extent_blk_csum_verify(struct inode *ip, void *data) { struct m_ext2fs *fs; struct ext4_extent_header *ehp; struct ext4_extent_tail *etp; uint32_t provided, calculated; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (0); ehp = (struct ext4_extent_header *)data; etp = (struct ext4_extent_tail *)(((char *)ehp) + EXT4_EXTENT_TAIL_OFFSET(ehp)); - provided = etp->et_checksum; + provided = le32toh(etp->et_checksum); calculated = ext2_extent_blk_csum(ip, ehp); if (provided != calculated) { SDT_PROBE2(ext2fs, , trace, csum, 1, "bad extent csum detected"); return (EIO); } return (0); } void ext2_extent_blk_csum_set(struct inode *ip, void *data) { struct m_ext2fs *fs; struct ext4_extent_header *ehp; struct ext4_extent_tail *etp; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; ehp = (struct ext4_extent_header *)data; etp = (struct ext4_extent_tail *)(((char *)data) + EXT4_EXTENT_TAIL_OFFSET(ehp)); - etp->et_checksum = ext2_extent_blk_csum(ip, - (struct ext4_extent_header *)data); + etp->et_checksum = htole32(ext2_extent_blk_csum(ip, + (struct ext4_extent_header *)data)); } int ext2_gd_i_bitmap_csum_verify(struct m_ext2fs *fs, int cg, struct buf *bp) { uint32_t hi, provided, calculated; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (0); - provided = fs->e2fs_gd[cg].ext4bgd_i_bmap_csum; + provided = le16toh(fs->e2fs_gd[cg].ext4bgd_i_bmap_csum); calculated = calculate_crc32c(fs->e2fs_csum_seed, bp->b_data, - fs->e2fs->e2fs_ipg / 8); - if (fs->e2fs->e3fs_desc_size >= EXT2_BG_INODE_BITMAP_CSUM_HI_END) { - hi = fs->e2fs_gd[cg].ext4bgd_i_bmap_csum_hi; + fs->e2fs_ipg / 8); + if (le16toh(fs->e2fs->e3fs_desc_size) >= + EXT2_BG_INODE_BITMAP_CSUM_HI_END) { + hi = le16toh(fs->e2fs_gd[cg].ext4bgd_i_bmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; if (provided != calculated) { SDT_PROBE2(ext2fs, , trace, csum, 1, "bad inode bitmap csum detected"); return (EIO); } return (0); } void ext2_gd_i_bitmap_csum_set(struct m_ext2fs *fs, int cg, struct buf *bp) { uint32_t csum; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; csum = calculate_crc32c(fs->e2fs_csum_seed, bp->b_data, - fs->e2fs->e2fs_ipg / 8); - fs->e2fs_gd[cg].ext4bgd_i_bmap_csum = csum & 0xFFFF; - if (fs->e2fs->e3fs_desc_size >= EXT2_BG_INODE_BITMAP_CSUM_HI_END) - fs->e2fs_gd[cg].ext4bgd_i_bmap_csum_hi = csum >> 16; + fs->e2fs_ipg / 8); + fs->e2fs_gd[cg].ext4bgd_i_bmap_csum = htole16(csum & 0xFFFF); + if (le16toh(fs->e2fs->e3fs_desc_size) >= EXT2_BG_INODE_BITMAP_CSUM_HI_END) + fs->e2fs_gd[cg].ext4bgd_i_bmap_csum_hi = htole16(csum >> 16); } int ext2_gd_b_bitmap_csum_verify(struct m_ext2fs *fs, int cg, struct buf *bp) { uint32_t hi, provided, calculated, size; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (0); size = fs->e2fs_fpg / 8; - provided = fs->e2fs_gd[cg].ext4bgd_b_bmap_csum; + provided = le16toh(fs->e2fs_gd[cg].ext4bgd_b_bmap_csum); calculated = calculate_crc32c(fs->e2fs_csum_seed, bp->b_data, size); - if (fs->e2fs->e3fs_desc_size >= EXT2_BG_BLOCK_BITMAP_CSUM_HI_LOCATION) { - hi = fs->e2fs_gd[cg].ext4bgd_b_bmap_csum_hi; + if (le16toh(fs->e2fs->e3fs_desc_size) >= + EXT2_BG_BLOCK_BITMAP_CSUM_HI_LOCATION) { + hi = le16toh(fs->e2fs_gd[cg].ext4bgd_b_bmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; if (provided != calculated) { SDT_PROBE2(ext2fs, , trace, csum, 1, "bad block bitmap csum detected"); return (EIO); } return (0); } void ext2_gd_b_bitmap_csum_set(struct m_ext2fs *fs, int cg, struct buf *bp) { uint32_t csum, size; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; size = fs->e2fs_fpg / 8; csum = calculate_crc32c(fs->e2fs_csum_seed, bp->b_data, size); - fs->e2fs_gd[cg].ext4bgd_b_bmap_csum = csum & 0xFFFF; - if (fs->e2fs->e3fs_desc_size >= EXT2_BG_BLOCK_BITMAP_CSUM_HI_LOCATION) - fs->e2fs_gd[cg].ext4bgd_b_bmap_csum_hi = csum >> 16; + fs->e2fs_gd[cg].ext4bgd_b_bmap_csum = htole16(csum & 0xFFFF); + if (le16toh(fs->e2fs->e3fs_desc_size) >= EXT2_BG_BLOCK_BITMAP_CSUM_HI_LOCATION) + fs->e2fs_gd[cg].ext4bgd_b_bmap_csum_hi = htole16(csum >> 16); } static uint32_t ext2_ei_csum(struct inode *ip, struct ext2fs_dinode *ei) { struct m_ext2fs *fs; uint32_t inode_csum_seed, inum, gen, crc; uint16_t dummy_csum = 0; unsigned int offset, csum_size; fs = ip->i_e2fs; offset = offsetof(struct ext2fs_dinode, e2di_chksum_lo); csum_size = sizeof(dummy_csum); - inum = ip->i_number; + inum = htole32(ip->i_number); crc = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&inum, sizeof(inum)); - gen = ip->i_gen; + gen = htole32(ip->i_gen); inode_csum_seed = calculate_crc32c(crc, (uint8_t *)&gen, sizeof(gen)); crc = calculate_crc32c(inode_csum_seed, (uint8_t *)ei, offset); crc = calculate_crc32c(crc, (uint8_t *)&dummy_csum, csum_size); offset += csum_size; crc = calculate_crc32c(crc, (uint8_t *)ei + offset, E2FS_REV0_INODE_SIZE - offset); if (EXT2_INODE_SIZE(fs) > E2FS_REV0_INODE_SIZE) { offset = offsetof(struct ext2fs_dinode, e2di_chksum_hi); crc = calculate_crc32c(crc, (uint8_t *)ei + E2FS_REV0_INODE_SIZE, offset - E2FS_REV0_INODE_SIZE); if ((EXT2_INODE_SIZE(ip->i_e2fs) > E2FS_REV0_INODE_SIZE && - ei->e2di_extra_isize >= EXT2_INODE_CSUM_HI_EXTRA_END)) { + le16toh(ei->e2di_extra_isize) >= + EXT2_INODE_CSUM_HI_EXTRA_END)) { crc = calculate_crc32c(crc, (uint8_t *)&dummy_csum, csum_size); offset += csum_size; } crc = calculate_crc32c(crc, (uint8_t *)ei + offset, EXT2_INODE_SIZE(fs) - offset); } return (crc); } int ext2_ei_csum_verify(struct inode *ip, struct ext2fs_dinode *ei) { struct m_ext2fs *fs; const static struct ext2fs_dinode ei_zero; uint32_t hi, provided, calculated; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return (0); - provided = ei->e2di_chksum_lo; + provided = le16toh(ei->e2di_chksum_lo); calculated = ext2_ei_csum(ip, ei); if ((EXT2_INODE_SIZE(fs) > E2FS_REV0_INODE_SIZE && - ei->e2di_extra_isize >= EXT2_INODE_CSUM_HI_EXTRA_END)) { - hi = ei->e2di_chksum_hi; + le16toh(ei->e2di_extra_isize) >= EXT2_INODE_CSUM_HI_EXTRA_END)) { + hi = le16toh(ei->e2di_chksum_hi); provided |= hi << 16; } else calculated &= 0xFFFF; if (provided != calculated) { /* * If it is first time used dinode, * it is expected that it will be zeroed * and we will not return checksum error in this case. */ if (!memcmp(ei, &ei_zero, sizeof(struct ext2fs_dinode))) return (0); SDT_PROBE2(ext2fs, , trace, csum, 1, "bad inode csum"); return (EIO); } return (0); } void ext2_ei_csum_set(struct inode *ip, struct ext2fs_dinode *ei) { struct m_ext2fs *fs; uint32_t crc; fs = ip->i_e2fs; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) return; crc = ext2_ei_csum(ip, ei); - ei->e2di_chksum_lo = crc & 0xFFFF; + ei->e2di_chksum_lo = htole16(crc & 0xFFFF); if ((EXT2_INODE_SIZE(fs) > E2FS_REV0_INODE_SIZE && - ei->e2di_extra_isize >= EXT2_INODE_CSUM_HI_EXTRA_END)) - ei->e2di_chksum_hi = crc >> 16; + le16toh(ei->e2di_extra_isize) >= EXT2_INODE_CSUM_HI_EXTRA_END)) + ei->e2di_chksum_hi = htole16(crc >> 16); } static uint16_t ext2_crc16(uint16_t crc, const void *buffer, unsigned int len) { const unsigned char *cp = buffer; /* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */ static uint16_t const crc16_table[256] = { 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 }; while (len--) crc = (((crc >> 8) & 0xffU) ^ crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU; return crc; } static uint16_t ext2_gd_csum(struct m_ext2fs *fs, uint32_t block_group, struct ext2_gd *gd) { size_t offset; uint32_t csum32; uint16_t crc, dummy_csum; offset = offsetof(struct ext2_gd, ext4bgd_csum); + block_group = htole32(block_group); + if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { csum32 = calculate_crc32c(fs->e2fs_csum_seed, (uint8_t *)&block_group, sizeof(block_group)); csum32 = calculate_crc32c(csum32, (uint8_t *)gd, offset); dummy_csum = 0; csum32 = calculate_crc32c(csum32, (uint8_t *)&dummy_csum, sizeof(dummy_csum)); offset += sizeof(dummy_csum); - if (offset < fs->e2fs->e3fs_desc_size) + if (offset < le16toh(fs->e2fs->e3fs_desc_size)) csum32 = calculate_crc32c(csum32, (uint8_t *)gd + offset, - fs->e2fs->e3fs_desc_size - offset); + le16toh(fs->e2fs->e3fs_desc_size) - offset); crc = csum32 & 0xFFFF; - return (crc); + return (htole16(crc)); } else if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { crc = ext2_crc16(~0, fs->e2fs->e2fs_uuid, sizeof(fs->e2fs->e2fs_uuid)); crc = ext2_crc16(crc, (uint8_t *)&block_group, sizeof(block_group)); crc = ext2_crc16(crc, (uint8_t *)gd, offset); offset += sizeof(gd->ext4bgd_csum); /* skip checksum */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT) && - offset < fs->e2fs->e3fs_desc_size) + offset < le16toh(fs->e2fs->e3fs_desc_size)) crc = ext2_crc16(crc, (uint8_t *)gd + offset, - fs->e2fs->e3fs_desc_size - offset); - return (crc); + le16toh(fs->e2fs->e3fs_desc_size) - offset); + return (htole16(crc)); } return (0); } int ext2_gd_csum_verify(struct m_ext2fs *fs, struct cdev *dev) { unsigned int i; int error = 0; for (i = 0; i < fs->e2fs_gcount; i++) { if (fs->e2fs_gd[i].ext4bgd_csum != ext2_gd_csum(fs, i, &fs->e2fs_gd[i])) { printf( "WARNING: mount of %s denied due bad gd=%d csum=0x%x, expected=0x%x - run fsck\n", devtoname(dev), i, fs->e2fs_gd[i].ext4bgd_csum, ext2_gd_csum(fs, i, &fs->e2fs_gd[i])); error = EIO; break; } } return (error); } void ext2_gd_csum_set(struct m_ext2fs *fs) { unsigned int i; for (i = 0; i < fs->e2fs_gcount; i++) - fs->e2fs_gd[i].ext4bgd_csum = - ext2_gd_csum(fs, i, &fs->e2fs_gd[i]); + fs->e2fs_gd[i].ext4bgd_csum = ext2_gd_csum(fs, i, &fs->e2fs_gd[i]); } Index: head/sys/fs/ext2fs/ext2_extattr.c =================================================================== --- head/sys/fs/ext2fs/ext2_extattr.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_extattr.c (revision 361136) @@ -1,1250 +1,1263 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017, Fedor Uporov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , trace, extattr, "int", "char*"); static int ext2_extattr_attrnamespace_to_bsd(int attrnamespace) { switch (attrnamespace) { case EXT4_XATTR_INDEX_SYSTEM: return (EXTATTR_NAMESPACE_SYSTEM); case EXT4_XATTR_INDEX_USER: return (EXTATTR_NAMESPACE_USER); case EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT: return (POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE); case EXT4_XATTR_INDEX_POSIX_ACL_ACCESS: return (POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE); } return (EXTATTR_NAMESPACE_EMPTY); } static const char * ext2_extattr_name_to_bsd(int attrnamespace, const char *name, int* name_len) { if (attrnamespace == EXT4_XATTR_INDEX_SYSTEM) return (name); else if (attrnamespace == EXT4_XATTR_INDEX_USER) return (name); else if (attrnamespace == EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT) { *name_len = strlen(POSIX1E_ACL_DEFAULT_EXTATTR_NAME); return (POSIX1E_ACL_DEFAULT_EXTATTR_NAME); } else if (attrnamespace == EXT4_XATTR_INDEX_POSIX_ACL_ACCESS) { *name_len = strlen(POSIX1E_ACL_ACCESS_EXTATTR_NAME); return (POSIX1E_ACL_ACCESS_EXTATTR_NAME); } /* * XXX: Not all linux namespaces are mapped to bsd for now, * return NULL, which will be converted to ENOTSUP on upper layer. */ SDT_PROBE2(ext2fs, , trace, extattr, 1, "can not convert ext2fs name to bsd namespace"); return (NULL); } static int ext2_extattr_attrnamespace_to_linux(int attrnamespace, const char *name) { if (attrnamespace == POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE && !strcmp(name, POSIX1E_ACL_DEFAULT_EXTATTR_NAME)) return (EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT); if (attrnamespace == POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE && !strcmp(name, POSIX1E_ACL_ACCESS_EXTATTR_NAME)) return (EXT4_XATTR_INDEX_POSIX_ACL_ACCESS); switch (attrnamespace) { case EXTATTR_NAMESPACE_SYSTEM: return (EXT4_XATTR_INDEX_SYSTEM); case EXTATTR_NAMESPACE_USER: return (EXT4_XATTR_INDEX_USER); } /* * In this case namespace conversion should be unique, * so this point is unreachable. */ return (-1); } static const char * ext2_extattr_name_to_linux(int attrnamespace, const char *name) { if (attrnamespace == POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE || attrnamespace == POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE) return (""); else return (name); } int ext2_extattr_valid_attrname(int attrnamespace, const char *attrname) { if (attrnamespace == EXTATTR_NAMESPACE_EMPTY) return (EINVAL); if (strlen(attrname) == 0) return (EINVAL); if (strlen(attrname) + 1 > EXT2_EXTATTR_NAMELEN_MAX) return (ENAMETOOLONG); return (0); } static int ext2_extattr_check(struct ext2fs_extattr_entry *entry, char *end) { struct ext2fs_extattr_entry *next; while (!EXT2_IS_LAST_ENTRY(entry)) { next = EXT2_EXTATTR_NEXT(entry); if ((char *)next >= end) return (EIO); entry = next; } return (0); } static int ext2_extattr_block_check(struct inode *ip, struct buf *bp) { struct ext2fs_extattr_header *header; int error; header = (struct ext2fs_extattr_header *)bp->b_data; error = ext2_extattr_check(EXT2_IFIRST(header), bp->b_data + bp->b_bufsize); if (error) return (error); return (ext2_extattr_blk_csum_verify(ip, bp)); } int ext2_extattr_inode_list(struct inode *ip, int attrnamespace, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + - E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); + E2FS_REV0_INODE_SIZE + le16toh(dinode->e2di_extra_isize)); - if (header->h_magic != EXTATTR_MAGIC) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC) { brelse(bp); return (0); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (size != NULL) *size += name_len + 1; if (uio != NULL) { char *name = malloc(name_len + 1, M_TEMP, M_WAITOK); name[0] = name_len; memcpy(&name[1], attr_name, name_len); error = uiomove(name, name_len + 1, uio); free(name, M_TEMP); if (error) break; } } brelse(bp); return (error); } int ext2_extattr_block_list(struct inode *ip, int attrnamespace, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); - if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_blocks) != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_block_check(ip, bp); if (error) { brelse(bp); return (error); } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (size != NULL) *size += name_len + 1; if (uio != NULL) { char *name = malloc(name_len + 1, M_TEMP, M_WAITOK); name[0] = name_len; memcpy(&name[1], attr_name, name_len); error = uiomove(name, name_len + 1, uio); free(name, M_TEMP); if (error) break; } } brelse(bp); return (error); } int ext2_extattr_inode_get(struct inode *ip, int attrnamespace, const char *name, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + - E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); + E2FS_REV0_INODE_SIZE + le16toh(dinode->e2di_extra_isize)); - if (header->h_magic != EXTATTR_MAGIC) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC) { brelse(bp); return (ENOATTR); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { if (size != NULL) - *size += entry->e_value_size; + *size += le32toh(entry->e_value_size); if (uio != NULL) error = uiomove(((char *)EXT2_IFIRST(header)) + - entry->e_value_offs, entry->e_value_size, uio); + le16toh(entry->e_value_offs), + le32toh(entry->e_value_size), uio); brelse(bp); return (error); } } brelse(bp); return (ENOATTR); } int ext2_extattr_block_get(struct inode *ip, int attrnamespace, const char *name, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); - if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_blocks) != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_block_check(ip, bp); if (error) { brelse(bp); return (error); } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { if (size != NULL) - *size += entry->e_value_size; + *size += le32toh(entry->e_value_size); if (uio != NULL) - error = uiomove(bp->b_data + entry->e_value_offs, - entry->e_value_size, uio); + error = uiomove(bp->b_data + + le16toh(entry->e_value_offs), + le32toh(entry->e_value_size), uio); brelse(bp); return (error); } } brelse(bp); return (ENOATTR); } static uint16_t ext2_extattr_delete_value(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end) { uint16_t min_offs; struct ext2fs_extattr_entry *next; min_offs = end - off; next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) { - if (min_offs > next->e_value_offs && next->e_value_offs > 0) - min_offs = next->e_value_offs; + if (min_offs > le16toh(next->e_value_offs) && + le16toh(next->e_value_offs) > 0) + min_offs = le16toh(next->e_value_offs); next = EXT2_EXTATTR_NEXT(next); } if (entry->e_value_size == 0) return (min_offs); - memmove(off + min_offs + EXT2_EXTATTR_SIZE(entry->e_value_size), - off + min_offs, entry->e_value_offs - min_offs); + memmove(off + min_offs + EXT2_EXTATTR_SIZE(le32toh(entry->e_value_size)), + off + min_offs, le16toh(entry->e_value_offs) - min_offs); /* Adjust all value offsets */ next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) { - if (next->e_value_offs > 0 && - next->e_value_offs < entry->e_value_offs) - next->e_value_offs += - EXT2_EXTATTR_SIZE(entry->e_value_size); + if (le16toh(next->e_value_offs) > 0 && + le16toh(next->e_value_offs) < le16toh(entry->e_value_offs)) + next->e_value_offs = htole16(le16toh(next->e_value_offs) + + EXT2_EXTATTR_SIZE(le32toh(entry->e_value_size))); next = EXT2_EXTATTR_NEXT(next); } - min_offs += EXT2_EXTATTR_SIZE(entry->e_value_size); + min_offs += EXT2_EXTATTR_SIZE(le32toh(entry->e_value_size)); return (min_offs); } static void ext2_extattr_delete_entry(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end) { char *pad; struct ext2fs_extattr_entry *next; /* Clean entry value */ ext2_extattr_delete_value(off, first_entry, entry, end); /* Clean the entry */ next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) next = EXT2_EXTATTR_NEXT(next); pad = (char*)next + sizeof(uint32_t); memmove(entry, (char *)entry + EXT2_EXTATTR_LEN(entry->e_name_len), pad - ((char *)entry + EXT2_EXTATTR_LEN(entry->e_name_len))); } int ext2_extattr_inode_delete(struct inode *ip, int attrnamespace, const char *name) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + - E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); + E2FS_REV0_INODE_SIZE + le16toh(dinode->e2di_extra_isize)); - if (header->h_magic != EXTATTR_MAGIC) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC) { brelse(bp); return (ENOATTR); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } /* If I am last entry, just make magic zero */ entry = EXT2_IFIRST(header); if ((EXT2_IS_LAST_ENTRY(EXT2_EXTATTR_NEXT(entry))) && (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) == attrnamespace)) { name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { memset(header, 0, sizeof(struct ext2fs_extattr_dinode_header)); return (bwrite(bp)); } } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ext2_extattr_delete_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), entry, (char *)dinode + EXT2_INODE_SIZE(fs)); return (bwrite(bp)); } } brelse(bp); return (ENOATTR); } static int ext2_extattr_block_clone(struct inode *ip, struct buf **bpp) { struct m_ext2fs *fs; struct buf *sbp; struct buf *cbp; struct ext2fs_extattr_header *header; uint64_t facl; fs = ip->i_e2fs; sbp = *bpp; header = EXT2_HDR(sbp); - if (header->h_magic != EXTATTR_MAGIC || header->h_refcount == 1) + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_refcount) == 1) return (EINVAL); facl = ext2_alloc_meta(ip); if (!facl) return (ENOSPC); cbp = getblk(ip->i_devvp, fsbtodb(fs, facl), fs->e2fs_bsize, 0, 0, 0); if (!cbp) { ext2_blkfree(ip, facl, fs->e2fs_bsize); return (EIO); } memcpy(cbp->b_data, sbp->b_data, fs->e2fs_bsize); - header->h_refcount--; + header->h_refcount = htole32(le32toh(header->h_refcount) - 1); bwrite(sbp); ip->i_facl = facl; ext2_update(ip->i_vnode, 1); header = EXT2_HDR(cbp); - header->h_refcount = 1; + header->h_refcount = htole32(1); *bpp = cbp; return (0); } int ext2_extattr_block_delete(struct inode *ip, int attrnamespace, const char *name) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); - if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_blocks) != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_block_check(ip, bp); if (error) { brelse(bp); return (error); } - if (header->h_refcount > 1) { + if (le32toh(header->h_refcount) > 1) { error = ext2_extattr_block_clone(ip, &bp); if (error) { brelse(bp); return (error); } } /* If I am last entry, clean me and free the block */ entry = EXT2_FIRST_ENTRY(bp); if (EXT2_IS_LAST_ENTRY(EXT2_EXTATTR_NEXT(entry)) && (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) == attrnamespace)) { name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ip->i_blocks -= btodb(fs->e2fs_bsize); ext2_blkfree(ip, ip->i_facl, fs->e2fs_bsize); ip->i_facl = 0; error = ext2_update(ip->i_vnode, 1); brelse(bp); return (error); } } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ext2_extattr_delete_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), entry, bp->b_data + bp->b_bufsize); return (bwrite(bp)); } } brelse(bp); return (ENOATTR); } static struct ext2fs_extattr_entry * allocate_entry(const char *name, int attrnamespace, uint16_t offs, uint32_t size, uint32_t hash) { const char *attr_name; int name_len; struct ext2fs_extattr_entry *entry; attr_name = ext2_extattr_name_to_linux(attrnamespace, name); name_len = strlen(attr_name); entry = malloc(sizeof(struct ext2fs_extattr_entry) + name_len, M_TEMP, M_WAITOK); entry->e_name_len = name_len; entry->e_name_index = ext2_extattr_attrnamespace_to_linux(attrnamespace, name); - entry->e_value_offs = offs; + entry->e_value_offs = htole16(offs); entry->e_value_block = 0; - entry->e_value_size = size; - entry->e_hash = hash; + entry->e_value_size = htole32(size); + entry->e_hash = htole32(hash); memcpy(entry->e_name, name, name_len); return (entry); } static void free_entry(struct ext2fs_extattr_entry *entry) { free(entry, M_TEMP); } static int ext2_extattr_get_size(struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *exist_entry, int header_size, int name_len, int new_size) { struct ext2fs_extattr_entry *entry; int size; size = header_size; size += sizeof(uint32_t); if (NULL == exist_entry) { size += EXT2_EXTATTR_LEN(name_len); size += EXT2_EXTATTR_SIZE(new_size); } if (first_entry) for (entry = first_entry; !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (entry != exist_entry) size += EXT2_EXTATTR_LEN(entry->e_name_len) + - EXT2_EXTATTR_SIZE(entry->e_value_size); + EXT2_EXTATTR_SIZE(le32toh(entry->e_value_size)); else size += EXT2_EXTATTR_LEN(entry->e_name_len) + EXT2_EXTATTR_SIZE(new_size); } return (size); } static void ext2_extattr_set_exist_entry(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end, struct uio *uio) { uint16_t min_offs; min_offs = ext2_extattr_delete_value(off, first_entry, entry, end); - entry->e_value_size = uio->uio_resid; - if (entry->e_value_size) - entry->e_value_offs = min_offs - - EXT2_EXTATTR_SIZE(uio->uio_resid); + entry->e_value_size = htole32(uio->uio_resid); + if (le32toh(entry->e_value_size)) + entry->e_value_offs = htole16(min_offs - + EXT2_EXTATTR_SIZE(uio->uio_resid)); else entry->e_value_offs = 0; - uiomove(off + entry->e_value_offs, entry->e_value_size, uio); + uiomove(off + le16toh(entry->e_value_offs), + le32toh(entry->e_value_size), uio); } static struct ext2fs_extattr_entry * ext2_extattr_set_new_entry(char *off, struct ext2fs_extattr_entry *first_entry, const char *name, int attrnamespace, char *end, struct uio *uio) { int name_len; char *pad; uint16_t min_offs; struct ext2fs_extattr_entry *entry; struct ext2fs_extattr_entry *new_entry; /* Find pad's */ min_offs = end - off; entry = first_entry; while (!EXT2_IS_LAST_ENTRY(entry)) { - if (min_offs > entry->e_value_offs && entry->e_value_offs > 0) - min_offs = entry->e_value_offs; + if (min_offs > le16toh(entry->e_value_offs) && + le16toh(entry->e_value_offs) > 0) + min_offs = le16toh(entry->e_value_offs); entry = EXT2_EXTATTR_NEXT(entry); } pad = (char*)entry + sizeof(uint32_t); /* Find entry insert position */ name_len = strlen(name); entry = first_entry; while (!EXT2_IS_LAST_ENTRY(entry)) { if (!(attrnamespace - entry->e_name_index) && !(name_len - entry->e_name_len)) if (memcmp(name, entry->e_name, name_len) <= 0) break; entry = EXT2_EXTATTR_NEXT(entry); } /* Create new entry and insert it */ new_entry = allocate_entry(name, attrnamespace, 0, uio->uio_resid, 0); memmove((char *)entry + EXT2_EXTATTR_LEN(new_entry->e_name_len), entry, pad - (char*)entry); memcpy(entry, new_entry, EXT2_EXTATTR_LEN(new_entry->e_name_len)); free_entry(new_entry); new_entry = entry; - if (new_entry->e_value_size > 0) - new_entry->e_value_offs = min_offs - - EXT2_EXTATTR_SIZE(new_entry->e_value_size); + if (le32toh(new_entry->e_value_size) > 0) + new_entry->e_value_offs = htole16(min_offs - + EXT2_EXTATTR_SIZE(le32toh(new_entry->e_value_size))); - uiomove(off + new_entry->e_value_offs, new_entry->e_value_size, uio); + uiomove(off + le16toh(new_entry->e_value_offs), + le32toh(new_entry->e_value_size), uio); return (new_entry); } int ext2_extattr_inode_set(struct inode *ip, int attrnamespace, const char *name, struct uio *uio) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; size_t size = 0, max_size; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + - E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); + E2FS_REV0_INODE_SIZE + le16toh(dinode->e2di_extra_isize)); - if (header->h_magic != EXTATTR_MAGIC) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC) { brelse(bp); return (ENOSPC); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } /* Find if entry exist */ for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) break; } max_size = EXT2_INODE_SIZE(fs) - E2FS_REV0_INODE_SIZE - - dinode->e2di_extra_isize; + le16toh(dinode->e2di_extra_isize); if (!EXT2_IS_LAST_ENTRY(entry)) { size = ext2_extattr_get_size(EXT2_IFIRST(header), entry, sizeof(struct ext2fs_extattr_dinode_header), entry->e_name_len, uio->uio_resid); if (size > max_size) { brelse(bp); return (ENOSPC); } ext2_extattr_set_exist_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), entry, (char *)header + max_size, uio); } else { /* Ensure that the same entry does not exist in the block */ if (ip->i_facl) { error = ext2_extattr_block_get(ip, attrnamespace, name, NULL, &size); if (error != ENOATTR || size > 0) { brelse(bp); if (size > 0) error = ENOSPC; return (error); } } size = ext2_extattr_get_size(EXT2_IFIRST(header), NULL, sizeof(struct ext2fs_extattr_dinode_header), entry->e_name_len, uio->uio_resid); if (size > max_size) { brelse(bp); return (ENOSPC); } ext2_extattr_set_new_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), name, attrnamespace, (char *)header + max_size, uio); } return (bwrite(bp)); } static void ext2_extattr_hash_entry(struct ext2fs_extattr_header *header, struct ext2fs_extattr_entry *entry) { uint32_t hash = 0; char *name = entry->e_name; int n; for (n=0; n < entry->e_name_len; n++) { hash = (hash << EXT2_EXTATTR_NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_NAME_HASH_SHIFT)) ^ (*name++); } if (entry->e_value_block == 0 && entry->e_value_size != 0) { - uint32_t *value = (uint32_t *)((char *)header + entry->e_value_offs); - for (n = (entry->e_value_size + + uint32_t *value = (uint32_t *)((char *)header + + le16toh(entry->e_value_offs)); + for (n = (le32toh(entry->e_value_size) + EXT2_EXTATTR_ROUND) >> EXT2_EXTATTR_PAD_BITS; n; n--) { hash = (hash << EXT2_EXTATTR_VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_VALUE_HASH_SHIFT)) ^ - (*value++); + le32toh(*value++); } } - entry->e_hash = hash; + entry->e_hash = htole32(hash); } static void ext2_extattr_rehash(struct ext2fs_extattr_header *header, struct ext2fs_extattr_entry *entry) { struct ext2fs_extattr_entry *here; uint32_t hash = 0; ext2_extattr_hash_entry(header, entry); here = EXT2_ENTRY(header+1); while (!EXT2_IS_LAST_ENTRY(here)) { - if (!here->e_hash) { + if (here->e_hash == 0) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << EXT2_EXTATTR_BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_BLOCK_HASH_SHIFT)) ^ - here->e_hash; + le32toh(here->e_hash); here = EXT2_EXTATTR_NEXT(here); } - header->h_hash = hash; + header->h_hash = htole32(hash); } int ext2_extattr_block_set(struct inode *ip, int attrnamespace, const char *name, struct uio *uio) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; size_t size; int error; fs = ip->i_e2fs; if (ip->i_facl) { error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); - if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_blocks) != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_block_check(ip, bp); if (error) { brelse(bp); return (error); } - if (header->h_refcount > 1) { + if (le32toh(header->h_refcount) > 1) { error = ext2_extattr_block_clone(ip, &bp); if (error) { brelse(bp); return (error); } header = EXT2_HDR(bp); } /* Find if entry exist */ for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) break; } if (!EXT2_IS_LAST_ENTRY(entry)) { size = ext2_extattr_get_size(EXT2_FIRST_ENTRY(bp), entry, sizeof(struct ext2fs_extattr_header), entry->e_name_len, uio->uio_resid); if (size > bp->b_bufsize) { brelse(bp); return (ENOSPC); } ext2_extattr_set_exist_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), entry, bp->b_data + bp->b_bufsize, uio); } else { size = ext2_extattr_get_size(EXT2_FIRST_ENTRY(bp), NULL, sizeof(struct ext2fs_extattr_header), strlen(name), uio->uio_resid); if (size > bp->b_bufsize) { brelse(bp); return (ENOSPC); } entry = ext2_extattr_set_new_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), name, attrnamespace, bp->b_data + bp->b_bufsize, uio); /* Clean the same entry in the inode */ error = ext2_extattr_inode_delete(ip, attrnamespace, name); if (error && error != ENOATTR) { brelse(bp); return (error); } } ext2_extattr_rehash(header, entry); ext2_extattr_blk_csum_set(ip, bp); return (bwrite(bp)); } size = ext2_extattr_get_size(NULL, NULL, sizeof(struct ext2fs_extattr_header), strlen(ext2_extattr_name_to_linux(attrnamespace, name)), uio->uio_resid); if (size > fs->e2fs_bsize) return (ENOSPC); /* Allocate block, fill EA header and insert entry */ ip->i_facl = ext2_alloc_meta(ip); if (0 == ip->i_facl) return (ENOSPC); ip->i_blocks += btodb(fs->e2fs_bsize); ext2_update(ip->i_vnode, 1); bp = getblk(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, 0, 0, 0); if (!bp) { ext2_blkfree(ip, ip->i_facl, fs->e2fs_bsize); ip->i_blocks -= btodb(fs->e2fs_bsize); ip->i_facl = 0; ext2_update(ip->i_vnode, 1); return (EIO); } header = EXT2_HDR(bp); - header->h_magic = EXTATTR_MAGIC; - header->h_refcount = 1; - header->h_blocks = 1; + header->h_magic = htole32(EXTATTR_MAGIC); + header->h_refcount = htole32(1); + header->h_blocks = htole32(1); header->h_hash = 0; memset(header->h_reserved, 0, sizeof(header->h_reserved)); memcpy(bp->b_data, header, sizeof(struct ext2fs_extattr_header)); memset(EXT2_FIRST_ENTRY(bp), 0, sizeof(uint32_t)); entry = ext2_extattr_set_new_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), name, attrnamespace, bp->b_data + bp->b_bufsize, uio); /* Clean the same entry in the inode */ error = ext2_extattr_inode_delete(ip, attrnamespace, name); if (error && error != ENOATTR) { brelse(bp); return (error); } ext2_extattr_rehash(header, entry); ext2_extattr_blk_csum_set(ip, bp); return (bwrite(bp)); } int ext2_extattr_free(struct inode *ip) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; int error; fs = ip->i_e2fs; if (!ip->i_facl) return (0); error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); - if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { + if (le32toh(header->h_magic) != EXTATTR_MAGIC || + le32toh(header->h_blocks) != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } - if (header->h_refcount > 1) { - header->h_refcount--; + if (le32toh(header->h_refcount) > 1) { + header->h_refcount = htole32(le32toh(header->h_refcount) - 1); bwrite(bp); } else { ext2_blkfree(ip, ip->i_facl, ip->i_e2fs->e2fs_bsize); brelse(bp); } ip->i_blocks -= btodb(ip->i_e2fs->e2fs_bsize); ip->i_facl = 0; ext2_update(ip->i_vnode, 1); return (0); } Index: head/sys/fs/ext2fs/ext2_extents.c =================================================================== --- head/sys/fs/ext2fs/ext2_extents.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_extents.c (revision 361136) @@ -1,1581 +1,1593 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*"); static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents"); #ifdef EXT2FS_PRINT_EXTENTS static void ext4_ext_print_extent(struct ext4_extent *ep) { printf(" ext %p => (blk %u len %u start %ju)\n", - ep, ep->e_blk, ep->e_len, - (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo); + ep, le32toh(ep->e_blk), le16toh(ep->e_len), + (uint64_t)le16toh(ep->e_start_hi) << 32 | le32toh(ep->e_start_lo)); } static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp); static void ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk) { struct m_ext2fs *fs; struct buf *bp; int error; fs = ip->i_e2fs; printf(" index %p => (blk %u pblk %ju)\n", - ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo); + ex, le32toh(ex->ei_blk), (uint64_t)le16toh(ex->ei_leaf_hi) << 32 | + le32toh(ex->ei_leaf_lo)); if(!do_walk) return; if ((error = bread(ip->i_devvp, - fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)), - (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { + fsbtodb(fs, ((uint64_t)le16toh(ex->ei_leaf_hi) << 32 | + le32toh(ex->ei_leaf_lo))), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return; } ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data); brelse(bp); } static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp) { int i; printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n", - ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, - ehp->eh_gen); + ehp, le16toh(ehp->eh_magic), le16toh(ehp->eh_ecount), + le16toh(ehp->eh_max), le16toh(ehp->eh_depth), le32toh(ehp->eh_gen)); - for (i = 0; i < ehp->eh_ecount; i++) + for (i = 0; i < le16toh(ehp->eh_ecount); i++) if (ehp->eh_depth != 0) ext4_ext_print_index(ip, (struct ext4_extent_index *)(ehp + 1 + i), 1); else ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i)); } static void ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path) { int k, l; l = path->ep_depth; printf("ip=%ju, Path:\n", ip->i_number); for (k = 0; k <= l; k++, path++) { if (path->ep_index) { ext4_ext_print_index(ip, path->ep_index, 0); } else if (path->ep_ext) { ext4_ext_print_extent(path->ep_ext); } } } void ext4_ext_print_extent_tree_status(struct inode *ip) { struct ext4_extent_header *ehp; ehp = (struct ext4_extent_header *)(char *)ip->i_db; printf("Extent status:ip=%ju\n", ip->i_number); if (!(ip->i_flag & IN_E4EXTENTS)) return; ext4_ext_print_header(ip, ehp); return; } #endif static inline struct ext4_extent_header * ext4_ext_inode_header(struct inode *ip) { return ((struct ext4_extent_header *)ip->i_db); } static inline struct ext4_extent_header * ext4_ext_block_header(char *bdata) { return ((struct ext4_extent_header *)bdata); } static inline unsigned short ext4_ext_inode_depth(struct inode *ip) { struct ext4_extent_header *ehp; ehp = (struct ext4_extent_header *)ip->i_data; - return (ehp->eh_depth); + return (le16toh(ehp->eh_depth)); } static inline e4fs_daddr_t ext4_ext_index_pblock(struct ext4_extent_index *index) { e4fs_daddr_t blk; - blk = index->ei_leaf_lo; - blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32; + blk = le32toh(index->ei_leaf_lo); + blk |= (e4fs_daddr_t)le16toh(index->ei_leaf_hi) << 32; return (blk); } static inline void ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb) { - index->ei_leaf_lo = pb & 0xffffffff; - index->ei_leaf_hi = (pb >> 32) & 0xffff; + index->ei_leaf_lo = htole32(pb & 0xffffffff); + index->ei_leaf_hi = htole16((pb >> 32) & 0xffff); } static inline e4fs_daddr_t ext4_ext_extent_pblock(struct ext4_extent *extent) { e4fs_daddr_t blk; - blk = extent->e_start_lo; - blk |= (e4fs_daddr_t)extent->e_start_hi << 32; + blk = le32toh(extent->e_start_lo); + blk |= (e4fs_daddr_t)le16toh(extent->e_start_hi) << 32; return (blk); } static inline void ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb) { - ex->e_start_lo = pb & 0xffffffff; - ex->e_start_hi = (pb >> 32) & 0xffff; + ex->e_start_lo = htole32(pb & 0xffffffff); + ex->e_start_hi = htole16((pb >> 32) & 0xffff); } int ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep) { struct ext4_extent_cache *ecp; int ret = EXT4_EXT_CACHE_NO; ecp = &ip->i_ext_cache; if (ecp->ec_type == EXT4_EXT_CACHE_NO) return (ret); if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) { - ep->e_blk = ecp->ec_blk; - ep->e_start_lo = ecp->ec_start & 0xffffffff; - ep->e_start_hi = ecp->ec_start >> 32 & 0xffff; - ep->e_len = ecp->ec_len; + ep->e_blk = htole32(ecp->ec_blk); + ep->e_start_lo = htole32(ecp->ec_start & 0xffffffff); + ep->e_start_hi = htole16(ecp->ec_start >> 32 & 0xffff); + ep->e_len = htole16(ecp->ec_len); ret = ecp->ec_type; } return (ret); } static int ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh) { struct m_ext2fs *fs; char *error_msg; fs = ip->i_e2fs; - if (eh->eh_magic != EXT4_EXT_MAGIC) { + if (le16toh(eh->eh_magic) != EXT4_EXT_MAGIC) { error_msg = "header: invalid magic"; goto corrupted; } if (eh->eh_max == 0) { error_msg = "header: invalid eh_max"; goto corrupted; } - if (eh->eh_ecount > eh->eh_max) { + if (le16toh(eh->eh_ecount) > le16toh(eh->eh_max)) { error_msg = "header: invalid eh_entries"; goto corrupted; } return (0); corrupted: SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg); return (EIO); } static void ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk) { struct ext4_extent_header *eh; struct ext4_extent_index *r, *l, *m; eh = path->ep_header; - KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0, + KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max) && + le16toh(eh->eh_ecount) > 0, ("ext4_ext_binsearch_index: bad args")); l = EXT_FIRST_INDEX(eh) + 1; - r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1; + r = EXT_FIRST_INDEX(eh) + le16toh(eh->eh_ecount) - 1; while (l <= r) { m = l + (r - l) / 2; - if (blk < m->ei_blk) + if (blk < le32toh(m->ei_blk)) r = m - 1; else l = m + 1; } path->ep_index = l - 1; } static void ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk) { struct ext4_extent_header *eh; struct ext4_extent *r, *l, *m; eh = path->ep_header; - KASSERT(eh->eh_ecount <= eh->eh_max, + KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max), ("ext4_ext_binsearch_ext: bad args")); if (eh->eh_ecount == 0) return; l = EXT_FIRST_EXTENT(eh) + 1; - r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1; + r = EXT_FIRST_EXTENT(eh) + le16toh(eh->eh_ecount) - 1; while (l <= r) { m = l + (r - l) / 2; - if (blk < m->e_blk) + if (blk < le32toh(m->e_blk)) r = m - 1; else l = m + 1; } path->ep_ext = l - 1; } static int ext4_ext_fill_path_bdata(struct ext4_extent_path *path, struct buf *bp, uint64_t blk) { KASSERT(path->ep_data == NULL, ("ext4_ext_fill_path_bdata: bad ep_data")); path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK); if (!path->ep_data) return (ENOMEM); memcpy(path->ep_data, bp->b_data, bp->b_bufsize); path->ep_blk = blk; return (0); } static void ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp) { KASSERT(path->ep_data != NULL, ("ext4_ext_fill_path_buf: bad ep_data")); memcpy(bp->b_data, path->ep_data, bp->b_bufsize); } static void ext4_ext_drop_refs(struct ext4_extent_path *path) { int depth, i; if (!path) return; depth = path->ep_depth; for (i = 0; i <= depth; i++, path++) if (path->ep_data) { free(path->ep_data, M_EXT2EXTENTS); path->ep_data = NULL; } } void ext4_ext_path_free(struct ext4_extent_path *path) { if (!path) return; ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); } int ext4_ext_find_extent(struct inode *ip, daddr_t block, struct ext4_extent_path **ppath) { struct m_ext2fs *fs; struct ext4_extent_header *eh; struct ext4_extent_path *path; struct buf *bp; uint64_t blk; int error, depth, i, ppos, alloc; fs = ip->i_e2fs; eh = ext4_ext_inode_header(ip); depth = ext4_ext_inode_depth(ip); ppos = 0; alloc = 0; error = ext4_ext_check_header(ip, eh); if (error) return (error); if (ppath == NULL) return (EINVAL); path = *ppath; if (path == NULL) { path = malloc(EXT4_EXT_DEPTH_MAX * sizeof(struct ext4_extent_path), M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!path) return (ENOMEM); *ppath = path; alloc = 1; } path[0].ep_header = eh; path[0].ep_data = NULL; /* Walk through the tree. */ i = depth; while (i) { ext4_ext_binsearch_index(&path[ppos], block); blk = ext4_ext_index_pblock(path[ppos].ep_index); path[ppos].ep_depth = i; path[ppos].ep_ext = NULL; error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk), ip->i_e2fs->e2fs_bsize, NOCRED, &bp); if (error) { goto error; } ppos++; if (ppos > depth) { SDT_PROBE2(ext2fs, , trace, extents, 1, "ppos > depth => extent corrupted"); error = EIO; brelse(bp); goto error; } ext4_ext_fill_path_bdata(&path[ppos], bp, blk); bqrelse(bp); eh = ext4_ext_block_header(path[ppos].ep_data); if (ext4_ext_check_header(ip, eh) || ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) { error = EIO; goto error; } path[ppos].ep_header = eh; i--; } error = ext4_ext_check_header(ip, eh); if (error) goto error; /* Find extent. */ path[ppos].ep_depth = i; path[ppos].ep_header = eh; path[ppos].ep_ext = NULL; path[ppos].ep_index = NULL; ext4_ext_binsearch_ext(&path[ppos], block); return (0); error: ext4_ext_drop_refs(path); if (alloc) free(path, M_EXT2EXTENTS); *ppath = NULL; return (error); } static inline int ext4_ext_space_root(struct inode *ip) { int size; size = sizeof(ip->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent); return (size); } static inline int ext4_ext_space_block(struct inode *ip) { struct m_ext2fs *fs; int size; fs = ip->i_e2fs; size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent); return (size); } static inline int ext4_ext_space_block_index(struct inode *ip) { struct m_ext2fs *fs; int size; fs = ip->i_e2fs; size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_index); return (size); } void ext4_ext_tree_init(struct inode *ip) { struct ext4_extent_header *ehp; ip->i_flag |= IN_E4EXTENTS; memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR); ehp = (struct ext4_extent_header *)ip->i_data; - ehp->eh_magic = EXT4_EXT_MAGIC; - ehp->eh_max = ext4_ext_space_root(ip); + ehp->eh_magic = htole16(EXT4_EXT_MAGIC); + ehp->eh_max = htole16(ext4_ext_space_root(ip)); ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } static inline void ext4_ext_put_in_cache(struct inode *ip, uint32_t blk, uint32_t len, uint32_t start, int type) { KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input")); ip->i_ext_cache.ec_type = type; ip->i_ext_cache.ec_blk = blk; ip->i_ext_cache.ec_len = len; ip->i_ext_cache.ec_start = start; } static e4fs_daddr_t ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path, e4fs_daddr_t block) { struct m_ext2fs *fs; struct ext4_extent *ex; e4fs_daddr_t bg_start; int depth; fs = ip->i_e2fs; if (path) { depth = path->ep_depth; ex = path[depth].ep_ext; if (ex) { e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex); - e2fs_daddr_t blk = ex->e_blk; + e2fs_daddr_t blk = le32toh(ex->e_blk); if (block > blk) return (pblk + (block - blk)); else return (pblk - (blk - block)); } /* Try to get block from index itself. */ if (path[depth].ep_data) return (path[depth].ep_blk); } /* Use inode's group. */ bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + - fs->e2fs->e2fs_first_dblock; + le32toh(fs->e2fs->e2fs_first_dblock); return (bg_start + block); } static int inline ext4_can_extents_be_merged(struct ext4_extent *ex1, struct ext4_extent *ex2) { - if (ex1->e_blk + ex1->e_len != ex2->e_blk) + if (le32toh(ex1->e_blk) + le16toh(ex1->e_len) != le32toh(ex2->e_blk)) return (0); - if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN) + if (le16toh(ex1->e_len) + le16toh(ex2->e_len) > EXT4_MAX_LEN) return (0); - if (ext4_ext_extent_pblock(ex1) + ex1->e_len == + if (ext4_ext_extent_pblock(ex1) + le16toh(ex1->e_len) == ext4_ext_extent_pblock(ex2)) return (1); return (0); } static unsigned ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path) { int depth = path->ep_depth; /* Empty tree */ if (depth == 0) return (EXT4_MAX_BLOCKS); /* Go to indexes. */ depth--; while (depth >= 0) { if (path[depth].ep_index != EXT_LAST_INDEX(path[depth].ep_header)) - return (path[depth].ep_index[1].ei_blk); + return (le32toh(path[depth].ep_index[1].ei_blk)); depth--; } return (EXT4_MAX_BLOCKS); } static int ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path) { struct m_ext2fs *fs; struct buf *bp; uint64_t blk; int error; fs = ip->i_e2fs; if (!path) return (EINVAL); if (path->ep_data) { blk = path->ep_blk; bp = getblk(ip->i_devvp, fsbtodb(fs, blk), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); ext4_ext_fill_path_buf(path, bp); ext2_extent_blk_csum_set(ip, bp->b_data); error = bwrite(bp); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; error = ext2_update(ip->i_vnode, 1); } return (error); } static int ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path, uint32_t lblk, e4fs_daddr_t blk) { struct m_ext2fs *fs; struct ext4_extent_index *idx; int len; fs = ip->i_e2fs; - if (lblk == path->ep_index->ei_blk) { + if (lblk == le32toh(path->ep_index->ei_blk)) { SDT_PROBE2(ext2fs, , trace, extents, 1, "lblk == index blk => extent corrupted"); return (EIO); } - if (path->ep_header->eh_ecount >= path->ep_header->eh_max) { + if (le16toh(path->ep_header->eh_ecount) >= + le16toh(path->ep_header->eh_max)) { SDT_PROBE2(ext2fs, , trace, extents, 1, "ecout > maxcount => extent corrupted"); return (EIO); } - if (lblk > path->ep_index->ei_blk) { + if (lblk > le32toh(path->ep_index->ei_blk)) { /* Insert after. */ idx = path->ep_index + 1; } else { /* Insert before. */ idx = path->ep_index; } len = EXT_LAST_INDEX(path->ep_header) - idx + 1; if (len > 0) memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index)); if (idx > EXT_MAX_INDEX(path->ep_header)) { SDT_PROBE2(ext2fs, , trace, extents, 1, "index is out of range => extent corrupted"); return (EIO); } - idx->ei_blk = lblk; + idx->ei_blk = htole32(lblk); ext4_index_store_pblock(idx, blk); - path->ep_header->eh_ecount++; + path->ep_header->eh_ecount = + htole16(le16toh(path->ep_header->eh_ecount) + 1); return (ext4_ext_dirty(ip, path)); } static e4fs_daddr_t ext4_ext_alloc_meta(struct inode *ip) { e4fs_daddr_t blk = ext2_alloc_meta(ip); if (blk) { ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize); ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } return (blk); } static void ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags) { struct m_ext2fs *fs; int i, blocksreleased; fs = ip->i_e2fs; blocksreleased = count; for(i = 0; i < count; i++) ext2_blkfree(ip, blk + i, fs->e2fs_bsize); if (ip->i_blocks >= blocksreleased) ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased); else ip->i_blocks = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } static int ext4_ext_split(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext, int at) { struct m_ext2fs *fs; struct buf *bp; int depth = ext4_ext_inode_depth(ip); struct ext4_extent_header *neh; struct ext4_extent_index *fidx; struct ext4_extent *ex; int i = at, k, m, a; e4fs_daddr_t newblk, oldblk; uint32_t border; e4fs_daddr_t *ablks = NULL; int error = 0; fs = ip->i_e2fs; bp = NULL; /* * We will split at current extent for now. */ if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) { SDT_PROBE2(ext2fs, , trace, extents, 1, "extent is out of range => extent corrupted"); return (EIO); } if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header)) - border = path[depth].ep_ext[1].e_blk; + border = le32toh(path[depth].ep_ext[1].e_blk); else - border = newext->e_blk; + border = le32toh(newext->e_blk); /* Allocate new blocks. */ ablks = malloc(sizeof(e4fs_daddr_t) * depth, M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!ablks) return (ENOMEM); for (a = 0; a < depth - at; a++) { newblk = ext4_ext_alloc_meta(ip); if (newblk == 0) goto cleanup; ablks[a] = newblk; } newblk = ablks[--a]; bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); if (!bp) { error = EIO; goto cleanup; } neh = ext4_ext_block_header(bp->b_data); neh->eh_ecount = 0; - neh->eh_max = ext4_ext_space_block(ip); - neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_max = le16toh(ext4_ext_space_block(ip)); + neh->eh_magic = le16toh(EXT4_EXT_MAGIC); neh->eh_depth = 0; ex = EXT_FIRST_EXTENT(neh); - if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) { + if (le16toh(path[depth].ep_header->eh_ecount) != + le16toh(path[depth].ep_header->eh_max)) { SDT_PROBE2(ext2fs, , trace, extents, 1, "extents count out of range => extent corrupted"); error = EIO; goto cleanup; } /* Start copy from next extent. */ m = 0; path[depth].ep_ext++; while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) { path[depth].ep_ext++; m++; } if (m) { memmove(ex, path[depth].ep_ext - m, sizeof(struct ext4_extent) * m); - neh->eh_ecount = neh->eh_ecount + m; + neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m); } ext2_extent_blk_csum_set(ip, bp->b_data); bwrite(bp); bp = NULL; /* Fix old leaf. */ if (m) { path[depth].ep_header->eh_ecount = - path[depth].ep_header->eh_ecount - m; + htole16(le16toh(path[depth].ep_header->eh_ecount) - m); ext4_ext_dirty(ip, path + depth); } /* Create intermediate indexes. */ k = depth - at - 1; KASSERT(k >= 0, ("ext4_ext_split: negative k")); /* Insert new index into current index block. */ i = depth - 1; while (k--) { oldblk = newblk; newblk = ablks[--a]; error = bread(ip->i_devvp, fsbtodb(fs, newblk), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { goto cleanup; } neh = (struct ext4_extent_header *)bp->b_data; - neh->eh_ecount = 1; - neh->eh_magic = EXT4_EXT_MAGIC; - neh->eh_max = ext4_ext_space_block_index(ip); - neh->eh_depth = depth - i; + neh->eh_ecount = htole16(1); + neh->eh_magic = htole16(EXT4_EXT_MAGIC); + neh->eh_max = htole16(ext4_ext_space_block_index(ip)); + neh->eh_depth = htole16(depth - i); fidx = EXT_FIRST_INDEX(neh); - fidx->ei_blk = border; + fidx->ei_blk = htole32(border); ext4_index_store_pblock(fidx, oldblk); m = 0; path[i].ep_index++; while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) { path[i].ep_index++; m++; } if (m) { memmove(++fidx, path[i].ep_index - m, sizeof(struct ext4_extent_index) * m); - neh->eh_ecount = neh->eh_ecount + m; + neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m); } ext2_extent_blk_csum_set(ip, bp->b_data); bwrite(bp); bp = NULL; /* Fix old index. */ if (m) { path[i].ep_header->eh_ecount = - path[i].ep_header->eh_ecount - m; + htole16(le16toh(path[i].ep_header->eh_ecount) - m); ext4_ext_dirty(ip, path + i); } i--; } error = ext4_ext_insert_index(ip, path + at, border, newblk); cleanup: if (bp) brelse(bp); if (error) { for (i = 0; i < depth; i++) { if (!ablks[i]) continue; ext4_ext_blkfree(ip, ablks[i], 1, 0); } } free(ablks, M_EXT2EXTENTS); return (error); } static int ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct m_ext2fs *fs; struct ext4_extent_path *curpath; struct ext4_extent_header *neh; struct buf *bp; e4fs_daddr_t newblk; int error = 0; fs = ip->i_e2fs; curpath = path; newblk = ext4_ext_alloc_meta(ip); if (newblk == 0) return (error); bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); /* Move top-level index/leaf into new block. */ memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data)); /* Set size of new block */ neh = ext4_ext_block_header(bp->b_data); - neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_magic = htole16(EXT4_EXT_MAGIC); if (ext4_ext_inode_depth(ip)) - neh->eh_max = ext4_ext_space_block_index(ip); + neh->eh_max = htole16(ext4_ext_space_block_index(ip)); else - neh->eh_max = ext4_ext_space_block(ip); + neh->eh_max = htole16(ext4_ext_space_block(ip)); ext2_extent_blk_csum_set(ip, bp->b_data); error = bwrite(bp); if (error) goto out; bp = NULL; - curpath->ep_header->eh_magic = EXT4_EXT_MAGIC; - curpath->ep_header->eh_max = ext4_ext_space_root(ip); - curpath->ep_header->eh_ecount = 1; + curpath->ep_header->eh_magic = htole16(EXT4_EXT_MAGIC); + curpath->ep_header->eh_max = htole16(ext4_ext_space_root(ip)); + curpath->ep_header->eh_ecount = htole16(1); curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header); curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk; ext4_index_store_pblock(curpath->ep_index, newblk); neh = ext4_ext_inode_header(ip); - neh->eh_depth = path->ep_depth + 1; + neh->eh_depth = htole16(path->ep_depth + 1); ext4_ext_dirty(ip, curpath); out: brelse(bp); return (error); } static int ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct ext4_extent_path *curpath; int depth, i, error; repeat: i = depth = ext4_ext_inode_depth(ip); /* Look for free index entry int the tree */ curpath = path + depth; while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) { i--; curpath--; } /* * We use already allocated block for index block, * so subsequent data blocks should be contiguous. */ if (EXT_HAS_FREE_INDEX(curpath)) { error = ext4_ext_split(ip, path, newext, i); if (error) goto out; /* Refill path. */ ext4_ext_drop_refs(path); - error = ext4_ext_find_extent(ip, newext->e_blk, &path); + error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path); if (error) goto out; } else { /* Tree is full, do grow in depth. */ error = ext4_ext_grow_indepth(ip, path, newext); if (error) goto out; /* Refill path. */ ext4_ext_drop_refs(path); - error = ext4_ext_find_extent(ip, newext->e_blk, &path); + error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path); if (error) goto out; /* Check and split tree if required. */ depth = ext4_ext_inode_depth(ip); - if (path[depth].ep_header->eh_ecount == - path[depth].ep_header->eh_max) + if (le16toh(path[depth].ep_header->eh_ecount) == + le16toh(path[depth].ep_header->eh_max)) goto repeat; } out: return (error); } static int ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path) { struct ext4_extent_header *eh; struct ext4_extent *ex; int32_t border; int depth, k; depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; ex = path[depth].ep_ext; if (ex == NULL || eh == NULL) return (EIO); if (!depth) return (0); /* We will correct tree if first leaf got modified only. */ if (ex != EXT_FIRST_EXTENT(eh)) return (0); k = depth - 1; - border = path[depth].ep_ext->e_blk; - path[k].ep_index->ei_blk = border; + border = le32toh(path[depth].ep_ext->e_blk); + path[k].ep_index->ei_blk = htole32(border); ext4_ext_dirty(ip, path + k); while (k--) { /* Change all left-side indexes. */ if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header)) break; - path[k].ep_index->ei_blk = border; + path[k].ep_index->ei_blk = htole32(border); ext4_ext_dirty(ip, path + k); } return (0); } static int ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct ext4_extent_header * eh; struct ext4_extent *ex, *nex, *nearex; struct ext4_extent_path *npath; int depth, len, error, next; depth = ext4_ext_inode_depth(ip); ex = path[depth].ep_ext; npath = NULL; - if (newext->e_len == 0 || path[depth].ep_header == NULL) + if (htole16(newext->e_len) == 0 || path[depth].ep_header == NULL) return (EINVAL); /* Insert block into found extent. */ if (ex && ext4_can_extents_be_merged(ex, newext)) { - ex->e_len = ex->e_len + newext->e_len; + ex->e_len = htole16(le16toh(ex->e_len) + le16toh(newext->e_len)); eh = path[depth].ep_header; nearex = ex; goto merge; } repeat: depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; - if (eh->eh_ecount < eh->eh_max) + if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) goto has_space; /* Try next leaf */ nex = EXT_LAST_EXTENT(eh); next = ext4_ext_next_leaf_block(ip, path); - if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) { + if (le32toh(newext->e_blk) > le32toh(nex->e_blk) && next != + EXT4_MAX_BLOCKS) { KASSERT(npath == NULL, ("ext4_ext_insert_extent: bad path")); error = ext4_ext_find_extent(ip, next, &npath); if (error) goto cleanup; if (npath->ep_depth != path->ep_depth) { error = EIO; goto cleanup; } eh = npath[depth].ep_header; - if (eh->eh_ecount < eh->eh_max) { + if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) { path = npath; goto repeat; } } /* * There is no free space in the found leaf, * try to add a new leaf to the tree. */ error = ext4_ext_create_new_leaf(ip, path, newext); if (error) goto cleanup; depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; has_space: nearex = path[depth].ep_ext; if (!nearex) { /* Create new extent in the leaf. */ path[depth].ep_ext = EXT_FIRST_EXTENT(eh); - } else if (newext->e_blk > nearex->e_blk) { + } else if (le32toh(newext->e_blk) > le32toh(nearex->e_blk)) { if (nearex != EXT_LAST_EXTENT(eh)) { len = EXT_MAX_EXTENT(eh) - nearex; len = (len - 1) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; memmove(nearex + 2, nearex + 1, len); } path[depth].ep_ext = nearex + 1; } else { len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; memmove(nearex + 1, nearex, len); path[depth].ep_ext = nearex; } - eh->eh_ecount = eh->eh_ecount + 1; + eh->eh_ecount = htole16(le16toh(eh->eh_ecount) + 1); nearex = path[depth].ep_ext; nearex->e_blk = newext->e_blk; nearex->e_start_lo = newext->e_start_lo; nearex->e_start_hi = newext->e_start_hi; nearex->e_len = newext->e_len; merge: /* Try to merge extents to the right. */ while (nearex < EXT_LAST_EXTENT(eh)) { if (!ext4_can_extents_be_merged(nearex, nearex + 1)) break; /* Merge with next extent. */ - nearex->e_len = nearex->e_len + nearex[1].e_len; + nearex->e_len = htole16(le16toh(nearex->e_len) + + le16toh(nearex[1].e_len)); if (nearex + 1 < EXT_LAST_EXTENT(eh)) { len = (EXT_LAST_EXTENT(eh) - nearex - 1) * sizeof(struct ext4_extent); memmove(nearex + 1, nearex + 2, len); } - eh->eh_ecount = eh->eh_ecount - 1; - KASSERT(eh->eh_ecount != 0, + eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1); + KASSERT(le16toh(eh->eh_ecount) != 0, ("ext4_ext_insert_extent: bad ecount")); } /* * Try to merge extents to the left, * start from inexes correction. */ error = ext4_ext_correct_indexes(ip, path); if (error) goto cleanup; ext4_ext_dirty(ip, path + depth); cleanup: if (npath) { ext4_ext_drop_refs(npath); free(npath, M_EXT2EXTENTS); } ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; return (error); } static e4fs_daddr_t ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref, struct ucred *cred, unsigned long *count, int *perror) { struct m_ext2fs *fs; e4fs_daddr_t newblk; /* * We will allocate only single block for now. */ if (*count > 1) return (0); fs = ip->i_e2fs; EXT2_LOCK(ip->i_ump); *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk); if (*perror) return (0); if (newblk) { ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } return (newblk); } int ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk, unsigned long max_blocks, struct ucred *cred, struct buf **bpp, int *pallocated, daddr_t *nb) { struct m_ext2fs *fs; struct buf *bp = NULL; struct ext4_extent_path *path; struct ext4_extent newex, *ex; e4fs_daddr_t bpref, newblk = 0; unsigned long allocated = 0; int error = 0, depth; if(bpp) *bpp = NULL; *pallocated = 0; /* Check cache. */ path = NULL; if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) { if (bpref == EXT4_EXT_CACHE_IN) { /* Block is already allocated. */ - newblk = iblk - newex.e_blk + + newblk = iblk - le32toh(newex.e_blk) + ext4_ext_extent_pblock(&newex); - allocated = newex.e_len - (iblk - newex.e_blk); + allocated = le16toh(newex.e_len) - (iblk - le32toh(newex.e_blk)); goto out; } else { error = EIO; goto out2; } } error = ext4_ext_find_extent(ip, iblk, &path); if (error) { goto out2; } depth = ext4_ext_inode_depth(ip); if (path[depth].ep_ext == NULL && depth != 0) { error = EIO; goto out2; } if ((ex = path[depth].ep_ext)) { - uint64_t lblk = ex->e_blk; - uint16_t e_len = ex->e_len; + uint64_t lblk = le32toh(ex->e_blk); + uint16_t e_len = le16toh(ex->e_len); e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex); if (e_len > EXT4_MAX_LEN) goto out2; /* If we found extent covers block, simply return it. */ if (iblk >= lblk && iblk < lblk + e_len) { newblk = iblk - lblk + e_start; allocated = e_len - (iblk - lblk); ext4_ext_put_in_cache(ip, lblk, e_len, e_start, EXT4_EXT_CACHE_IN); goto out; } } /* Allocate the new block. */ if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) { ip->i_next_alloc_goal = 0; } bpref = ext4_ext_blkpref(ip, path, iblk); allocated = max_blocks; newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error); if (!newblk) goto out2; /* Try to insert new extent into found leaf and return. */ - newex.e_blk = iblk; + newex.e_blk = htole32(iblk); ext4_ext_store_pblock(&newex, newblk); - newex.e_len = allocated; + newex.e_len = htole16(allocated); error = ext4_ext_insert_extent(ip, path, &newex); if (error) goto out2; newblk = ext4_ext_extent_pblock(&newex); ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN); *pallocated = 1; out: if (allocated > max_blocks) allocated = max_blocks; if (bpp) { fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, cred, &bp); if (error) { brelse(bp); } else { *bpp = bp; } } out2: if (path) { ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); } if (nb) *nb = newblk; return (error); } static inline uint16_t ext4_ext_get_actual_len(struct ext4_extent *ext) { - return (ext->e_len <= EXT_INIT_MAX_LEN ? - ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN)); + return (le16toh(ext->e_len) <= EXT_INIT_MAX_LEN ? + le16toh(ext->e_len) : (le16toh(ext->e_len) - EXT_INIT_MAX_LEN)); } static inline struct ext4_extent_header * ext4_ext_header(struct inode *ip) { return ((struct ext4_extent_header *)ip->i_db); } static int ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex, unsigned long from, unsigned long to) { unsigned long num, start; - if (from >= ex->e_blk && - to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) { + if (from >= le32toh(ex->e_blk) && + to == le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - 1) { /* Tail cleanup. */ - num = ex->e_blk + ext4_ext_get_actual_len(ex) - from; + num = le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - from; start = ext4_ext_extent_pblock(ex) + ext4_ext_get_actual_len(ex) - num; ext4_ext_blkfree(ip, start, num, 0); } return (0); } static int ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path) { e4fs_daddr_t leaf; /* Free index block. */ path--; leaf = ext4_ext_index_pblock(path->ep_index); KASSERT(path->ep_header->eh_ecount != 0, ("ext4_ext_rm_index: bad ecount")); - path->ep_header->eh_ecount--; + path->ep_header->eh_ecount = + htole16(le16toh(path->ep_header->eh_ecount) - 1); ext4_ext_dirty(ip, path); ext4_ext_blkfree(ip, leaf, 1, 0); return (0); } static int ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path, uint64_t start) { struct ext4_extent_header *eh; struct ext4_extent *ex; unsigned int a, b, block, num; unsigned long ex_blk; unsigned short ex_len; int depth; int error, correct_index; depth = ext4_ext_inode_depth(ip); if (!path[depth].ep_header) { if (path[depth].ep_data == NULL) return (EINVAL); path[depth].ep_header = (struct ext4_extent_header* )path[depth].ep_data; } eh = path[depth].ep_header; if (!eh) { SDT_PROBE2(ext2fs, , trace, extents, 1, "bad header => extent corrupted"); return (EIO); } ex = EXT_LAST_EXTENT(eh); - ex_blk = ex->e_blk; + ex_blk = le32toh(ex->e_blk); ex_len = ext4_ext_get_actual_len(ex); error = 0; correct_index = 0; while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) { path[depth].ep_ext = ex; a = ex_blk > start ? ex_blk : start; b = (uint64_t)ex_blk + ex_len - 1 < EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS; if (a != ex_blk && b != ex_blk + ex_len - 1) return (EINVAL); else if (a != ex_blk) { /* Remove tail of the extent. */ block = ex_blk; num = a - block; } else if (b != ex_blk + ex_len - 1) { /* Remove head of the extent, not implemented. */ return (EINVAL); } else { /* Remove whole extent. */ block = ex_blk; num = 0; } if (ex == EXT_FIRST_EXTENT(eh)) correct_index = 1; error = ext4_remove_blocks(ip, ex, a, b); if (error) goto out; if (num == 0) { ext4_ext_store_pblock(ex, 0); - eh->eh_ecount--; + eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1); } - ex->e_blk = block; - ex->e_len = num; + ex->e_blk = htole32(block); + ex->e_len = htole16(num); ext4_ext_dirty(ip, path + depth); ex--; - ex_blk = ex->e_blk; + ex_blk = htole32(ex->e_blk); ex_len = ext4_ext_get_actual_len(ex); }; - if (correct_index && eh->eh_ecount) + if (correct_index && le16toh(eh->eh_ecount)) error = ext4_ext_correct_indexes(ip, path); /* * If this leaf is free, we should * remove it from index block above. */ - if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL) + if (error == 0 && eh->eh_ecount == 0 && + path[depth].ep_data != NULL) error = ext4_ext_rm_index(ip, path + depth); out: return (error); } static struct buf * ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk, int depth, int flags) { struct m_ext2fs *fs; struct ext4_extent_header *eh; struct buf *bp; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, pblk), fs->e2fs_bsize, NOCRED, &bp); if (error) { return (NULL); } eh = ext4_ext_block_header(bp->b_data); - if (eh->eh_depth != depth) { + if (le16toh(eh->eh_depth) != depth) { SDT_PROBE2(ext2fs, , trace, extents, 1, "unexpected eh_depth"); goto err; } error = ext4_ext_check_header(ip, eh); if (error) goto err; return (bp); err: brelse(bp); return (NULL); } static int inline ext4_ext_more_to_rm(struct ext4_extent_path *path) { KASSERT(path->ep_index != NULL, ("ext4_ext_more_to_rm: bad index from path")); if (path->ep_index < EXT_FIRST_INDEX(path->ep_header)) return (0); - if (path->ep_header->eh_ecount == path->index_count) + if (le16toh(path->ep_header->eh_ecount) == path->index_count) return (0); return (1); } int ext4_ext_remove_space(struct inode *ip, off_t length, int flags, struct ucred *cred, struct thread *td) { struct buf *bp; struct ext4_extent_header *ehp; struct ext4_extent_path *path; int depth; int i, error; ehp = (struct ext4_extent_header *)ip->i_db; depth = ext4_ext_inode_depth(ip); error = ext4_ext_check_header(ip, ehp); if(error) return (error); path = malloc(sizeof(struct ext4_extent_path) * (depth + 1), M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!path) return (ENOMEM); path[0].ep_header = ehp; path[0].ep_depth = depth; i = 0; while (error == 0 && i >= 0) { if (i == depth) { /* This is leaf. */ error = ext4_ext_rm_leaf(ip, path, length); if (error) break; free(path[i].ep_data, M_EXT2EXTENTS); path[i].ep_data = NULL; i--; continue; } /* This is index. */ if (!path[i].ep_header) path[i].ep_header = (struct ext4_extent_header *)path[i].ep_data; if (!path[i].ep_index) { /* This level hasn't touched yet. */ path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header); - path[i].index_count = path[i].ep_header->eh_ecount + 1; + path[i].index_count = + le16toh(path[i].ep_header->eh_ecount) + 1; } else { /* We've already was here, see at next index. */ path[i].ep_index--; } if (ext4_ext_more_to_rm(path + i)) { memset(path + i + 1, 0, sizeof(*path)); bp = ext4_read_extent_tree_block(ip, ext4_ext_index_pblock(path[i].ep_index), path[0].ep_depth - (i + 1), 0); if (!bp) { error = EIO; break; } ext4_ext_fill_path_bdata(&path[i+1], bp, ext4_ext_index_pblock(path[i].ep_index)); brelse(bp); - path[i].index_count = path[i].ep_header->eh_ecount; + path[i].index_count = + le16toh(path[i].ep_header->eh_ecount); i++; } else { if (path[i].ep_header->eh_ecount == 0 && i > 0) { /* Index is empty, remove it. */ error = ext4_ext_rm_index(ip, path + i); } free(path[i].ep_data, M_EXT2EXTENTS); path[i].ep_data = NULL; i--; } } if (path->ep_header->eh_ecount == 0) { /* * Truncate the tree to zero. */ ext4_ext_header(ip)->eh_depth = 0; - ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip); + ext4_ext_header(ip)->eh_max = htole16(ext4_ext_space_root(ip)); ext4_ext_dirty(ip, path); } ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); return (error); } Index: head/sys/fs/ext2fs/ext2_extents.h =================================================================== --- head/sys/fs/ext2fs/ext2_extents.h (revision 361135) +++ head/sys/fs/ext2fs/ext2_extents.h (revision 361136) @@ -1,136 +1,136 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012, 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FS_EXT2FS_EXT2_EXTENTS_H_ #define _FS_EXT2FS_EXT2_EXTENTS_H_ #include #define EXT4_EXT_MAGIC 0xf30a #define EXT4_MAX_BLOCKS 0xffffffff #define EXT_INIT_MAX_LEN (1UL << 15) #define EXT4_MAX_LEN (EXT_INIT_MAX_LEN - 1) #define EXT4_EXT_DEPTH_MAX 5 #define EXT4_EXT_CACHE_NO 0 #define EXT4_EXT_CACHE_GAP 1 #define EXT4_EXT_CACHE_IN 2 /* * Ext4 extent tail with csum */ struct ext4_extent_tail { uint32_t et_checksum; /* crc32c(uuid+inum+extent_block) */ }; /* * Ext4 file system extent on disk. */ struct ext4_extent { uint32_t e_blk; /* first logical block */ uint16_t e_len; /* number of blocks */ uint16_t e_start_hi; /* high 16 bits of physical block */ uint32_t e_start_lo; /* low 32 bits of physical block */ }; /* * Extent index on disk. */ struct ext4_extent_index { uint32_t ei_blk; /* indexes logical blocks */ uint32_t ei_leaf_lo; /* points to physical block of the * next level */ uint16_t ei_leaf_hi; /* high 16 bits of physical block */ uint16_t ei_unused; }; /* * Extent tree header. */ struct ext4_extent_header { uint16_t eh_magic; /* magic number: 0xf30a */ uint16_t eh_ecount; /* number of valid entries */ uint16_t eh_max; /* capacity of store in entries */ uint16_t eh_depth; /* the depth of extent tree */ uint32_t eh_gen; /* generation of extent tree */ }; /* * Save cached extent. */ struct ext4_extent_cache { daddr_t ec_start; /* extent start */ uint32_t ec_blk; /* logical block */ uint32_t ec_len; uint32_t ec_type; }; /* * Save path to some extent. */ struct ext4_extent_path { int index_count; uint16_t ep_depth; uint64_t ep_blk; char *ep_data; struct ext4_extent *ep_ext; struct ext4_extent_index *ep_index; struct ext4_extent_header *ep_header; }; #define EXT_FIRST_EXTENT(hdr) ((struct ext4_extent *)(((char *)(hdr)) + \ sizeof(struct ext4_extent_header))) #define EXT_FIRST_INDEX(hdr) ((struct ext4_extent_index *)(((char *)(hdr)) + \ sizeof(struct ext4_extent_header))) -#define EXT_LAST_EXTENT(hdr) (EXT_FIRST_EXTENT((hdr)) + (hdr)->eh_ecount - 1) -#define EXT_LAST_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_ecount - 1) +#define EXT_LAST_EXTENT(hdr) (EXT_FIRST_EXTENT((hdr)) + le16toh((hdr)->eh_ecount) - 1) +#define EXT_LAST_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + le16toh((hdr)->eh_ecount) - 1) #define EXT4_EXTENT_TAIL_OFFSET(hdr) (sizeof(struct ext4_extent_header) + \ - (sizeof(struct ext4_extent) * (hdr)->eh_max)) + (sizeof(struct ext4_extent) * le16toh((hdr)->eh_max))) #define EXT_HAS_FREE_INDEX(path) \ - ((path)->ep_header->eh_ecount < (path)->ep_header->eh_max) -#define EXT_MAX_EXTENT(hdr) (EXT_FIRST_EXTENT(hdr) + ((hdr)->eh_max) - 1) -#define EXT_MAX_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_max - 1) + (le16toh((path)->ep_header->eh_ecount) < le16toh((path)->ep_header->eh_max)) +#define EXT_MAX_EXTENT(hdr) (EXT_FIRST_EXTENT(hdr) + le16toh((hdr)->eh_max) - 1) +#define EXT_MAX_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + le16toh((hdr)->eh_max) - 1) struct inode; struct m_ext2fs; void ext4_ext_tree_init(struct inode *ip); int ext4_ext_in_cache(struct inode *, daddr_t, struct ext4_extent *); void ext4_ext_put_cache(struct inode *, struct ext4_extent *, int); int ext4_ext_find_extent(struct inode *, daddr_t, struct ext4_extent_path **); void ext4_ext_path_free(struct ext4_extent_path *path); int ext4_ext_remove_space(struct inode *ip, off_t length, int flags, struct ucred *cred, struct thread *td); int ext4_ext_get_blocks(struct inode *ip, int64_t iblock, unsigned long max_blocks, struct ucred *cred, struct buf **bpp, int *allocate, daddr_t *); #ifdef EXT2FS_PRINT_EXTENTS void ext4_ext_print_extent_tree_status(struct inode *ip); #endif #endif /* !_FS_EXT2FS_EXT2_EXTENTS_H_ */ Index: head/sys/fs/ext2fs/ext2_htree.c =================================================================== --- head/sys/fs/ext2fs/ext2_htree.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_htree.c (revision 361136) @@ -1,942 +1,961 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010, 2012 Zheng Liu * Copyright (c) 2012, Vyacheslav Matyushin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , trace, htree, "int", "char*"); static void ext2_append_entry(char *block, uint32_t blksize, struct ext2fs_direct_2 *last_entry, struct ext2fs_direct_2 *new_entry, int csum_size); static int ext2_htree_append_block(struct vnode *vp, char *data, struct componentname *cnp, uint32_t blksize); static int ext2_htree_check_next(struct inode *ip, uint32_t hash, const char *name, struct ext2fs_htree_lookup_info *info); static int ext2_htree_cmp_sort_entry(const void *e1, const void *e2); static int ext2_htree_find_leaf(struct inode *ip, const char *name, int namelen, uint32_t *hash, uint8_t *hash_version, struct ext2fs_htree_lookup_info *info); static uint32_t ext2_htree_get_block(struct ext2fs_htree_entry *ep); static uint16_t ext2_htree_get_count(struct ext2fs_htree_entry *ep); static uint32_t ext2_htree_get_hash(struct ext2fs_htree_entry *ep); static uint16_t ext2_htree_get_limit(struct ext2fs_htree_entry *ep); static void ext2_htree_insert_entry_to_level(struct ext2fs_htree_lookup_level *level, uint32_t hash, uint32_t blk); static void ext2_htree_insert_entry(struct ext2fs_htree_lookup_info *info, uint32_t hash, uint32_t blk); static uint32_t ext2_htree_node_limit(struct inode *ip); static void ext2_htree_set_block(struct ext2fs_htree_entry *ep, uint32_t blk); static void ext2_htree_set_count(struct ext2fs_htree_entry *ep, uint16_t cnt); static void ext2_htree_set_hash(struct ext2fs_htree_entry *ep, uint32_t hash); static void ext2_htree_set_limit(struct ext2fs_htree_entry *ep, uint16_t limit); static int ext2_htree_split_dirblock(struct inode *ip, char *block1, char *block2, uint32_t blksize, uint32_t *hash_seed, uint8_t hash_version, uint32_t *split_hash, struct ext2fs_direct_2 *entry); static void ext2_htree_release(struct ext2fs_htree_lookup_info *info); static uint32_t ext2_htree_root_limit(struct inode *ip, int len); static int ext2_htree_writebuf(struct inode *ip, struct ext2fs_htree_lookup_info *info); int ext2_htree_has_idx(struct inode *ip) { if (EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_DIRHASHINDEX) && ip->i_flag & IN_E3INDEX) return (1); else return (0); } static int ext2_htree_check_next(struct inode *ip, uint32_t hash, const char *name, struct ext2fs_htree_lookup_info *info) { struct vnode *vp = ITOV(ip); struct ext2fs_htree_lookup_level *level; struct buf *bp; uint32_t next_hash; int idx = info->h_levels_num - 1; int levels = 0; do { level = &info->h_levels[idx]; level->h_entry++; if (level->h_entry < level->h_entries + ext2_htree_get_count(level->h_entries)) break; if (idx == 0) return (0); idx--; levels++; } while (1); next_hash = ext2_htree_get_hash(level->h_entry); if ((hash & 1) == 0) { if (hash != (next_hash & ~1)) return (0); } while (levels > 0) { levels--; if (ext2_blkatoff(vp, ext2_htree_get_block(level->h_entry) * ip->i_e2fs->e2fs_bsize, NULL, &bp) != 0) return (0); level = &info->h_levels[idx + 1]; brelse(level->h_bp); level->h_bp = bp; level->h_entry = level->h_entries = ((struct ext2fs_htree_node *)bp->b_data)->h_entries; } return (1); } static uint32_t ext2_htree_get_block(struct ext2fs_htree_entry *ep) { - return (ep->h_blk & 0x00FFFFFF); + return (le32toh(ep->h_blk) & 0x00FFFFFF); } static void ext2_htree_set_block(struct ext2fs_htree_entry *ep, uint32_t blk) { - ep->h_blk = blk; + ep->h_blk = htole32(blk); } static uint16_t ext2_htree_get_count(struct ext2fs_htree_entry *ep) { - return (((struct ext2fs_htree_count *)(ep))->h_entries_num); + return (le16toh(((struct ext2fs_htree_count *)(ep))->h_entries_num)); } static void ext2_htree_set_count(struct ext2fs_htree_entry *ep, uint16_t cnt) { - ((struct ext2fs_htree_count *)(ep))->h_entries_num = cnt; + ((struct ext2fs_htree_count *)(ep))->h_entries_num = htole16(cnt); } static uint32_t ext2_htree_get_hash(struct ext2fs_htree_entry *ep) { - return (ep->h_hash); + return (le32toh(ep->h_hash)); } static uint16_t ext2_htree_get_limit(struct ext2fs_htree_entry *ep) { - return (((struct ext2fs_htree_count *)(ep))->h_entries_max); + return (le16toh(((struct ext2fs_htree_count *)(ep))->h_entries_max)); } static void ext2_htree_set_hash(struct ext2fs_htree_entry *ep, uint32_t hash) { - ep->h_hash = hash; + ep->h_hash = htole32(hash); } static void ext2_htree_set_limit(struct ext2fs_htree_entry *ep, uint16_t limit) { - ((struct ext2fs_htree_count *)(ep))->h_entries_max = limit; + ((struct ext2fs_htree_count *)(ep))->h_entries_max = htole16(limit); } static void ext2_htree_release(struct ext2fs_htree_lookup_info *info) { u_int i; for (i = 0; i < info->h_levels_num; i++) { struct buf *bp = info->h_levels[i].h_bp; if (bp != NULL) brelse(bp); } } static uint32_t ext2_htree_root_limit(struct inode *ip, int len) { struct m_ext2fs *fs; uint32_t space; fs = ip->i_e2fs; space = ip->i_e2fs->e2fs_bsize - EXT2_DIR_REC_LEN(1) - EXT2_DIR_REC_LEN(2) - len; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) space -= sizeof(struct ext2fs_htree_tail); return (space / sizeof(struct ext2fs_htree_entry)); } static uint32_t ext2_htree_node_limit(struct inode *ip) { struct m_ext2fs *fs; uint32_t space; fs = ip->i_e2fs; space = fs->e2fs_bsize - EXT2_DIR_REC_LEN(0); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) space -= sizeof(struct ext2fs_htree_tail); return (space / sizeof(struct ext2fs_htree_entry)); } +static void +ext2_get_hash_seed(struct ext2fs* es, uint32_t* seed) +{ + + for (int i = 0; i < 4; i++) + seed[i] = le32toh(es->e3fs_hash_seed[i]); +} + static int ext2_htree_find_leaf(struct inode *ip, const char *name, int namelen, uint32_t *hash, uint8_t *hash_ver, struct ext2fs_htree_lookup_info *info) { struct vnode *vp; struct ext2fs *fs; struct m_ext2fs *m_fs; struct buf *bp = NULL; struct ext2fs_htree_root *rootp; struct ext2fs_htree_entry *entp, *start, *end, *middle, *found; struct ext2fs_htree_lookup_level *level_info; uint32_t hash_major = 0, hash_minor = 0; uint32_t levels, cnt; + uint32_t hash_seed[4]; uint8_t hash_version; if (name == NULL || info == NULL) return (-1); vp = ITOV(ip); fs = ip->i_e2fs->e2fs; m_fs = ip->i_e2fs; if (ext2_blkatoff(vp, 0, NULL, &bp) != 0) return (-1); info->h_levels_num = 1; info->h_levels[0].h_bp = bp; rootp = (struct ext2fs_htree_root *)bp->b_data; if (rootp->h_info.h_hash_version != EXT2_HTREE_LEGACY && rootp->h_info.h_hash_version != EXT2_HTREE_HALF_MD4 && rootp->h_info.h_hash_version != EXT2_HTREE_TEA) goto error; hash_version = rootp->h_info.h_hash_version; if (hash_version <= EXT2_HTREE_TEA) hash_version += m_fs->e2fs_uhash; *hash_ver = hash_version; - ext2_htree_hash(name, namelen, fs->e3fs_hash_seed, + ext2_get_hash_seed(fs, hash_seed); + ext2_htree_hash(name, namelen, hash_seed, hash_version, &hash_major, &hash_minor); *hash = hash_major; if ((levels = rootp->h_info.h_ind_levels) > 1) goto error; entp = (struct ext2fs_htree_entry *)(((char *)&rootp->h_info) + rootp->h_info.h_info_len); if (ext2_htree_get_limit(entp) != ext2_htree_root_limit(ip, rootp->h_info.h_info_len)) goto error; while (1) { cnt = ext2_htree_get_count(entp); if (cnt == 0 || cnt > ext2_htree_get_limit(entp)) goto error; start = entp + 1; end = entp + cnt - 1; while (start <= end) { middle = start + (end - start) / 2; if (ext2_htree_get_hash(middle) > hash_major) end = middle - 1; else start = middle + 1; } found = start - 1; level_info = &(info->h_levels[info->h_levels_num - 1]); level_info->h_bp = bp; level_info->h_entries = entp; level_info->h_entry = found; if (levels == 0) return (0); levels--; if (ext2_blkatoff(vp, ext2_htree_get_block(found) * m_fs->e2fs_bsize, NULL, &bp) != 0) goto error; entp = ((struct ext2fs_htree_node *)bp->b_data)->h_entries; info->h_levels_num++; info->h_levels[info->h_levels_num - 1].h_bp = bp; } error: ext2_htree_release(info); return (-1); } /* * Try to lookup a directory entry in HTree index */ int ext2_htree_lookup(struct inode *ip, const char *name, int namelen, struct buf **bpp, int *entryoffp, doff_t *offp, doff_t *prevoffp, doff_t *endusefulp, struct ext2fs_searchslot *ss) { struct vnode *vp; struct ext2fs_htree_lookup_info info; struct ext2fs_htree_entry *leaf_node; struct m_ext2fs *m_fs; struct buf *bp; uint32_t blk; uint32_t dirhash; uint32_t bsize; uint8_t hash_version; int search_next; int found = 0; m_fs = ip->i_e2fs; bsize = m_fs->e2fs_bsize; vp = ITOV(ip); /* TODO: print error msg because we don't lookup '.' and '..' */ memset(&info, 0, sizeof(info)); if (ext2_htree_find_leaf(ip, name, namelen, &dirhash, &hash_version, &info)) return (-1); do { leaf_node = info.h_levels[info.h_levels_num - 1].h_entry; blk = ext2_htree_get_block(leaf_node); if (ext2_blkatoff(vp, blk * bsize, NULL, &bp) != 0) { ext2_htree_release(&info); return (-1); } *offp = blk * bsize; *entryoffp = 0; *prevoffp = blk * bsize; *endusefulp = blk * bsize; if (ss->slotstatus == NONE) { ss->slotoffset = -1; ss->slotfreespace = 0; } if (ext2_search_dirblock(ip, bp->b_data, &found, name, namelen, entryoffp, offp, prevoffp, endusefulp, ss) != 0) { brelse(bp); ext2_htree_release(&info); return (-1); } if (found) { *bpp = bp; ext2_htree_release(&info); return (0); } brelse(bp); search_next = ext2_htree_check_next(ip, dirhash, name, &info); } while (search_next); ext2_htree_release(&info); return (ENOENT); } static int ext2_htree_append_block(struct vnode *vp, char *data, struct componentname *cnp, uint32_t blksize) { struct iovec aiov; struct uio auio; struct inode *dp = VTOI(vp); uint64_t cursize, newsize; int error; cursize = roundup(dp->i_size, blksize); newsize = cursize + blksize; auio.uio_offset = cursize; auio.uio_resid = blksize; aiov.iov_len = blksize; aiov.iov_base = data; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; auio.uio_segflg = UIO_SYSSPACE; error = VOP_WRITE(vp, &auio, IO_SYNC, cnp->cn_cred); if (!error) dp->i_size = newsize; return (error); } static int ext2_htree_writebuf(struct inode* ip, struct ext2fs_htree_lookup_info *info) { int i, error; for (i = 0; i < info->h_levels_num; i++) { struct buf *bp = info->h_levels[i].h_bp; ext2_dx_csum_set(ip, (struct ext2fs_direct_2 *)bp->b_data); error = bwrite(bp); if (error) return (error); } return (0); } static void ext2_htree_insert_entry_to_level(struct ext2fs_htree_lookup_level *level, uint32_t hash, uint32_t blk) { struct ext2fs_htree_entry *target; int entries_num; target = level->h_entry + 1; entries_num = ext2_htree_get_count(level->h_entries); memmove(target + 1, target, (char *)(level->h_entries + entries_num) - (char *)target); ext2_htree_set_block(target, blk); ext2_htree_set_hash(target, hash); ext2_htree_set_count(level->h_entries, entries_num + 1); } /* * Insert an index entry to the index node. */ static void ext2_htree_insert_entry(struct ext2fs_htree_lookup_info *info, uint32_t hash, uint32_t blk) { struct ext2fs_htree_lookup_level *level; level = &info->h_levels[info->h_levels_num - 1]; ext2_htree_insert_entry_to_level(level, hash, blk); } /* * Compare two entry sort descriptors by name hash value. * This is used together with qsort. */ static int ext2_htree_cmp_sort_entry(const void *e1, const void *e2) { const struct ext2fs_htree_sort_entry *entry1, *entry2; entry1 = (const struct ext2fs_htree_sort_entry *)e1; entry2 = (const struct ext2fs_htree_sort_entry *)e2; - if (entry1->h_hash < entry2->h_hash) + if (le32toh(entry1->h_hash) < le32toh(entry2->h_hash)) return (-1); - if (entry1->h_hash > entry2->h_hash) + if (le32toh(entry1->h_hash) > le32toh(entry2->h_hash)) return (1); return (0); } /* * Append an entry to the end of the directory block. */ static void ext2_append_entry(char *block, uint32_t blksize, struct ext2fs_direct_2 *last_entry, struct ext2fs_direct_2 *new_entry, int csum_size) { uint16_t entry_len; entry_len = EXT2_DIR_REC_LEN(last_entry->e2d_namlen); - last_entry->e2d_reclen = entry_len; + last_entry->e2d_reclen = htole16(entry_len); last_entry = (struct ext2fs_direct_2 *)((char *)last_entry + entry_len); - new_entry->e2d_reclen = block + blksize - (char *)last_entry - csum_size; + new_entry->e2d_reclen = htole16(block + blksize - (char *)last_entry - + csum_size); memcpy(last_entry, new_entry, EXT2_DIR_REC_LEN(new_entry->e2d_namlen)); } /* * Move half of entries from the old directory block to the new one. */ static int ext2_htree_split_dirblock(struct inode *ip, char *block1, char *block2, uint32_t blksize, uint32_t *hash_seed, uint8_t hash_version, uint32_t *split_hash, struct ext2fs_direct_2 *entry) { struct m_ext2fs *fs; int entry_cnt = 0; int size = 0, csum_size = 0; int i, k; uint32_t offset; uint16_t entry_len = 0; uint32_t entry_hash; struct ext2fs_direct_2 *ep, *last; char *dest; struct ext2fs_htree_sort_entry *sort_info; fs = ip->i_e2fs; ep = (struct ext2fs_direct_2 *)block1; dest = block2; sort_info = (struct ext2fs_htree_sort_entry *) ((char *)block2 + blksize); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) csum_size = sizeof(struct ext2fs_direct_tail); /* * Calculate name hash value for the entry which is to be added. */ ext2_htree_hash(entry->e2d_name, entry->e2d_namlen, hash_seed, hash_version, &entry_hash, NULL); /* * Fill in directory entry sort descriptors. */ while ((char *)ep < block1 + blksize - csum_size) { - if (ep->e2d_ino && ep->e2d_namlen) { + if (le32toh(ep->e2d_ino) && ep->e2d_namlen) { entry_cnt++; sort_info--; sort_info->h_size = ep->e2d_reclen; - sort_info->h_offset = (char *)ep - block1; + sort_info->h_offset = htole16((char *)ep - block1); ext2_htree_hash(ep->e2d_name, ep->e2d_namlen, hash_seed, hash_version, &sort_info->h_hash, NULL); + sort_info->h_hash = htole32(sort_info->h_hash); } ep = (struct ext2fs_direct_2 *) - ((char *)ep + ep->e2d_reclen); + ((char *)ep + le16toh(ep->e2d_reclen)); } /* * Sort directory entry descriptors by name hash value. */ qsort(sort_info, entry_cnt, sizeof(struct ext2fs_htree_sort_entry), ext2_htree_cmp_sort_entry); /* * Count the number of entries to move to directory block 2. */ for (i = entry_cnt - 1; i >= 0; i--) { - if (sort_info[i].h_size + size > blksize / 2) + if (le16toh(sort_info[i].h_size) + size > blksize / 2) break; - size += sort_info[i].h_size; + size += le16toh(sort_info[i].h_size); } - *split_hash = sort_info[i + 1].h_hash; + *split_hash = le32toh(sort_info[i + 1].h_hash); /* * Set collision bit. */ - if (*split_hash == sort_info[i].h_hash) + if (*split_hash == le32toh(sort_info[i].h_hash)) *split_hash += 1; /* * Move half of directory entries from block 1 to block 2. */ for (k = i + 1; k < entry_cnt; k++) { ep = (struct ext2fs_direct_2 *)((char *)block1 + - sort_info[k].h_offset); + le16toh(sort_info[k].h_offset)); entry_len = EXT2_DIR_REC_LEN(ep->e2d_namlen); memcpy(dest, ep, entry_len); - ((struct ext2fs_direct_2 *)dest)->e2d_reclen = entry_len; + ((struct ext2fs_direct_2 *)dest)->e2d_reclen = + htole16(entry_len); /* Mark directory entry as unused. */ ep->e2d_ino = 0; dest += entry_len; } dest -= entry_len; /* Shrink directory entries in block 1. */ last = (struct ext2fs_direct_2 *)block1; entry_len = 0; for (offset = 0; offset < blksize - csum_size; ) { ep = (struct ext2fs_direct_2 *)(block1 + offset); - offset += ep->e2d_reclen; - if (ep->e2d_ino) { + offset += le16toh(ep->e2d_reclen); + if (le32toh(ep->e2d_ino)) { last = (struct ext2fs_direct_2 *) ((char *)last + entry_len); entry_len = EXT2_DIR_REC_LEN(ep->e2d_namlen); memcpy((void *)last, (void *)ep, entry_len); - last->e2d_reclen = entry_len; + last->e2d_reclen = htole16(entry_len); } } if (entry_hash >= *split_hash) { /* Add entry to block 2. */ ext2_append_entry(block2, blksize, (struct ext2fs_direct_2 *)dest, entry, csum_size); /* Adjust length field of last entry of block 1. */ - last->e2d_reclen = block1 + blksize - (char *)last - csum_size; + last->e2d_reclen = htole16(block1 + blksize - (char *)last - + csum_size); } else { /* Add entry to block 1. */ ext2_append_entry(block1, blksize, last, entry, csum_size); /* Adjust length field of last entry of block 2. */ ((struct ext2fs_direct_2 *)dest)->e2d_reclen = - block2 + blksize - dest - csum_size; + htole16(block2 + blksize - dest - csum_size); } if (csum_size) { ext2_init_dirent_tail(EXT2_DIRENT_TAIL(block1, blksize)); ext2_init_dirent_tail(EXT2_DIRENT_TAIL(block2, blksize)); } return (0); } /* * Create an HTree index for a directory */ int ext2_htree_create_index(struct vnode *vp, struct componentname *cnp, struct ext2fs_direct_2 *new_entry) { struct buf *bp = NULL; struct inode *dp; struct ext2fs *fs; struct m_ext2fs *m_fs; struct ext2fs_direct_2 *ep, *dotdot; struct ext2fs_htree_root *root; struct ext2fs_htree_lookup_info info; uint32_t blksize, dirlen, split_hash; + uint32_t hash_seed[4]; uint8_t hash_version; char *buf1 = NULL; char *buf2 = NULL; int error = 0; dp = VTOI(vp); fs = dp->i_e2fs->e2fs; m_fs = dp->i_e2fs; blksize = m_fs->e2fs_bsize; buf1 = malloc(blksize, M_TEMP, M_WAITOK | M_ZERO); buf2 = malloc(blksize, M_TEMP, M_WAITOK | M_ZERO); if ((error = ext2_blkatoff(vp, 0, NULL, &bp)) != 0) goto out; root = (struct ext2fs_htree_root *)bp->b_data; dotdot = (struct ext2fs_direct_2 *)((char *)&(root->h_dotdot)); - ep = (struct ext2fs_direct_2 *)((char *)dotdot + dotdot->e2d_reclen); + ep = (struct ext2fs_direct_2 *)((char *)dotdot + + le16toh(dotdot->e2d_reclen)); dirlen = (char *)root + blksize - (char *)ep; memcpy(buf1, ep, dirlen); ep = (struct ext2fs_direct_2 *)buf1; while ((char *)ep < buf1 + dirlen) ep = (struct ext2fs_direct_2 *) - ((char *)ep + ep->e2d_reclen); - ep->e2d_reclen = buf1 + blksize - (char *)ep; + ((char *)ep + le16toh(ep->e2d_reclen)); + ep->e2d_reclen = htole16(buf1 + blksize - (char *)ep); dp->i_flag |= IN_E3INDEX; /* * Initialize index root. */ - dotdot->e2d_reclen = blksize - EXT2_DIR_REC_LEN(1); + dotdot->e2d_reclen = htole16(blksize - EXT2_DIR_REC_LEN(1)); memset(&root->h_info, 0, sizeof(root->h_info)); root->h_info.h_hash_version = fs->e3fs_def_hash_version; root->h_info.h_info_len = sizeof(root->h_info); ext2_htree_set_block(root->h_entries, 1); ext2_htree_set_count(root->h_entries, 1); ext2_htree_set_limit(root->h_entries, ext2_htree_root_limit(dp, sizeof(root->h_info))); memset(&info, 0, sizeof(info)); info.h_levels_num = 1; info.h_levels[0].h_entries = root->h_entries; info.h_levels[0].h_entry = root->h_entries; hash_version = root->h_info.h_hash_version; if (hash_version <= EXT2_HTREE_TEA) hash_version += m_fs->e2fs_uhash; - ext2_htree_split_dirblock(dp, buf1, buf2, blksize, fs->e3fs_hash_seed, + ext2_get_hash_seed(fs, hash_seed); + ext2_htree_split_dirblock(dp, buf1, buf2, blksize, hash_seed, hash_version, &split_hash, new_entry); ext2_htree_insert_entry(&info, split_hash, 2); /* * Write directory block 0. */ ext2_dx_csum_set(dp, (struct ext2fs_direct_2 *)bp->b_data); if (DOINGASYNC(vp)) { bdwrite(bp); error = 0; } else { error = bwrite(bp); } dp->i_flag |= IN_CHANGE | IN_UPDATE; if (error) goto out; /* * Write directory block 1. */ ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)buf1); error = ext2_htree_append_block(vp, buf1, cnp, blksize); if (error) goto out1; /* * Write directory block 2. */ ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)buf2); error = ext2_htree_append_block(vp, buf2, cnp, blksize); free(buf1, M_TEMP); free(buf2, M_TEMP); return (error); out: if (bp != NULL) brelse(bp); out1: free(buf1, M_TEMP); free(buf2, M_TEMP); return (error); } /* * Add an entry to the directory using htree index. */ int ext2_htree_add_entry(struct vnode *dvp, struct ext2fs_direct_2 *entry, struct componentname *cnp) { struct ext2fs_htree_entry *entries, *leaf_node; struct ext2fs_htree_lookup_info info; struct buf *bp = NULL; struct ext2fs *fs; struct m_ext2fs *m_fs; struct inode *ip; uint16_t ent_num; uint32_t dirhash, split_hash; uint32_t blksize, blknum; uint64_t cursize, dirsize; + uint32_t hash_seed[4]; uint8_t hash_version; char *newdirblock = NULL; char *newidxblock = NULL; struct ext2fs_htree_node *dst_node; struct ext2fs_htree_entry *dst_entries; struct ext2fs_htree_entry *root_entires; struct buf *dst_bp = NULL; int error, write_bp = 0, write_dst_bp = 0, write_info = 0; ip = VTOI(dvp); m_fs = ip->i_e2fs; fs = m_fs->e2fs; blksize = m_fs->e2fs_bsize; if (ip->i_count != 0) return ext2_add_entry(dvp, entry); /* Target directory block is full, split it */ memset(&info, 0, sizeof(info)); error = ext2_htree_find_leaf(ip, entry->e2d_name, entry->e2d_namlen, &dirhash, &hash_version, &info); if (error) return (error); entries = info.h_levels[info.h_levels_num - 1].h_entries; ent_num = ext2_htree_get_count(entries); if (ent_num == ext2_htree_get_limit(entries)) { /* Split the index node. */ root_entires = info.h_levels[0].h_entries; newidxblock = malloc(blksize, M_TEMP, M_WAITOK | M_ZERO); dst_node = (struct ext2fs_htree_node *)newidxblock; memset(&dst_node->h_fake_dirent, 0, sizeof(dst_node->h_fake_dirent)); - dst_node->h_fake_dirent.e2d_reclen = blksize; + dst_node->h_fake_dirent.e2d_reclen = htole16(blksize); cursize = roundup(ip->i_size, blksize); dirsize = cursize + blksize; blknum = dirsize / blksize - 1; ext2_dx_csum_set(ip, (struct ext2fs_direct_2 *)newidxblock); error = ext2_htree_append_block(dvp, newidxblock, cnp, blksize); if (error) goto finish; error = ext2_blkatoff(dvp, cursize, NULL, &dst_bp); if (error) goto finish; dst_node = (struct ext2fs_htree_node *)dst_bp->b_data; dst_entries = dst_node->h_entries; if (info.h_levels_num == 2) { uint16_t src_ent_num, dst_ent_num; if (ext2_htree_get_count(root_entires) == ext2_htree_get_limit(root_entires)) { SDT_PROBE2(ext2fs, , trace, htree, 1, "directory index is full"); error = EIO; goto finish; } src_ent_num = ent_num / 2; dst_ent_num = ent_num - src_ent_num; split_hash = ext2_htree_get_hash(entries + src_ent_num); /* Move half of index entries to the new index node */ memcpy(dst_entries, entries + src_ent_num, dst_ent_num * sizeof(struct ext2fs_htree_entry)); ext2_htree_set_count(entries, src_ent_num); ext2_htree_set_count(dst_entries, dst_ent_num); ext2_htree_set_limit(dst_entries, ext2_htree_node_limit(ip)); if (info.h_levels[1].h_entry >= entries + src_ent_num) { struct buf *tmp = info.h_levels[1].h_bp; info.h_levels[1].h_bp = dst_bp; dst_bp = tmp; info.h_levels[1].h_entry = info.h_levels[1].h_entry - (entries + src_ent_num) + dst_entries; info.h_levels[1].h_entries = dst_entries; } ext2_htree_insert_entry_to_level(&info.h_levels[0], split_hash, blknum); /* Write new index node to disk */ ext2_dx_csum_set(ip, (struct ext2fs_direct_2 *)dst_bp->b_data); error = bwrite(dst_bp); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (error) goto finish; write_dst_bp = 1; } else { /* Create second level for htree index */ struct ext2fs_htree_root *idx_root; memcpy(dst_entries, entries, ent_num * sizeof(struct ext2fs_htree_entry)); ext2_htree_set_limit(dst_entries, ext2_htree_node_limit(ip)); idx_root = (struct ext2fs_htree_root *) info.h_levels[0].h_bp->b_data; idx_root->h_info.h_ind_levels = 1; ext2_htree_set_count(entries, 1); ext2_htree_set_block(entries, blknum); info.h_levels_num = 2; info.h_levels[1].h_entries = dst_entries; info.h_levels[1].h_entry = info.h_levels[0].h_entry - info.h_levels[0].h_entries + dst_entries; info.h_levels[1].h_bp = dst_bp; dst_bp = NULL; } } leaf_node = info.h_levels[info.h_levels_num - 1].h_entry; blknum = ext2_htree_get_block(leaf_node); error = ext2_blkatoff(dvp, blknum * blksize, NULL, &bp); if (error) goto finish; /* Split target directory block */ newdirblock = malloc(blksize, M_TEMP, M_WAITOK | M_ZERO); + ext2_get_hash_seed(fs, hash_seed); ext2_htree_split_dirblock(ip, (char *)bp->b_data, newdirblock, blksize, - fs->e3fs_hash_seed, hash_version, &split_hash, entry); + hash_seed, hash_version, &split_hash, entry); cursize = roundup(ip->i_size, blksize); dirsize = cursize + blksize; blknum = dirsize / blksize - 1; /* Add index entry for the new directory block */ ext2_htree_insert_entry(&info, split_hash, blknum); /* Write the new directory block to the end of the directory */ ext2_dirent_csum_set(ip, (struct ext2fs_direct_2 *)newdirblock); error = ext2_htree_append_block(dvp, newdirblock, cnp, blksize); if (error) goto finish; /* Write the target directory block */ ext2_dirent_csum_set(ip, (struct ext2fs_direct_2 *)bp->b_data); error = bwrite(bp); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (error) goto finish; write_bp = 1; /* Write the index block */ error = ext2_htree_writebuf(ip, &info); if (!error) write_info = 1; finish: if (dst_bp != NULL && !write_dst_bp) brelse(dst_bp); if (bp != NULL && !write_bp) brelse(bp); if (newdirblock != NULL) free(newdirblock, M_TEMP); if (newidxblock != NULL) free(newidxblock, M_TEMP); if (!write_info) ext2_htree_release(&info); return (error); } Index: head/sys/fs/ext2fs/ext2_inode.c =================================================================== --- head/sys/fs/ext2fs/ext2_inode.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_inode.c (revision 361136) @@ -1,643 +1,644 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93 * $FreeBSD$ */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Update the access, modified, and inode change times as specified by the * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode * to disk if the IN_MODIFIED flag is set (it may be set initially, or by * the timestamp update). The IN_LAZYMOD flag is set to force a write * later if not now. If we write now, then clear both IN_MODIFIED and * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is * set, then wait for the write to complete. */ int ext2_update(struct vnode *vp, int waitfor) { struct m_ext2fs *fs; struct buf *bp; struct inode *ip; int error; ASSERT_VOP_ELOCKED(vp, "ext2_update"); ext2_itimes(vp); ip = VTOI(vp); if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) return (0); ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); fs = ip->i_e2fs; if (fs->e2fs_ronly) return (0); if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); if (error) { brelse(bp); return (error); } if (waitfor && !DOINGASYNC(vp)) return (bwrite(bp)); else { bdwrite(bp); return (0); } } #define SINGLE 0 /* index of single indirect block */ #define DOUBLE 1 /* index of double indirect block */ #define TRIPLE 2 /* index of triple indirect block */ /* * Release blocks associated with the inode ip and stored in the indirect * block bn. Blocks are free'd in LIFO order up to (but not including) * lastbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. * * NB: triple indirect blocks are untested. */ static int ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, daddr_t lastbn, int level, e4fs_daddr_t *countp) { struct buf *bp; struct m_ext2fs *fs = ip->i_e2fs; struct vnode *vp; e2fs_daddr_t *bap, *copy; int i, nblocks, error = 0, allerror = 0; e2fs_lbn_t nb, nlbn, last; e4fs_daddr_t blkcount, factor, blocksreleased = 0; /* * Calculate index in current block of last * block to be kept. -1 indicates the entire * block so we need not calculate the index. */ factor = 1; for (i = SINGLE; i < level; i++) factor *= NINDIR(fs); last = lastbn; if (lastbn > 0) last /= factor; nblocks = btodb(fs->e2fs_bsize); /* * Get buffer of block pointers, zero those entries corresponding * to blocks to be free'd, and update on disk copy first. Since * double(triple) indirect before single(double) indirect, calls * to bmap on these blocks will fail. However, we already have * the on disk address, so we have to set the b_blkno field * explicitly instead of letting bread do everything for us. */ vp = ITOV(ip); bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { bp->b_iocmd = BIO_READ; if (bp->b_bcount > bp->b_bufsize) panic("ext2_indirtrunc: bad buffer size"); bp->b_blkno = dbn; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); error = bufwait(bp); } if (error) { brelse(bp); *countp = 0; return (error); } bap = (e2fs_daddr_t *)bp->b_data; copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); bzero((caddr_t)&bap[last + 1], (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); if (last == -1) bp->b_flags |= B_INVAL; if (DOINGASYNC(vp)) { bdwrite(bp); } else { error = bwrite(bp); if (error) allerror = error; } bap = copy; /* * Recursively free totally unused blocks. */ for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; i--, nlbn += factor) { - nb = bap[i]; + nb = le32toh(bap[i]); if (nb == 0) continue; if (level > SINGLE) { if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) allerror = error; blocksreleased += blkcount; } ext2_blkfree(ip, nb, fs->e2fs_bsize); blocksreleased += nblocks; } /* * Recursively free last partial block. */ if (level > SINGLE && lastbn >= 0) { last = lastbn % factor; - nb = bap[i]; + nb = le32toh(bap[i]); if (nb != 0) { if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), last, level - 1, &blkcount)) != 0) allerror = error; blocksreleased += blkcount; } } free(copy, M_TEMP); *countp = blocksreleased; return (allerror); } /* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ static int ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct vnode *ovp = vp; e4fs_daddr_t lastblock; struct inode *oip; e4fs_daddr_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; struct m_ext2fs *fs; struct buf *bp; int offset, size, level; e4fs_daddr_t count, nblocks, blocksreleased = 0; int error, i, allerror; off_t osize; #ifdef INVARIANTS struct bufobj *bo; #endif oip = VTOI(ovp); #ifdef INVARIANTS bo = &ovp->v_bufobj; #endif fs = oip->i_e2fs; osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > oip->i_e2fs->e2fs_maxfilesize) return (EFBIG); vnode_pager_setsize(ovp, length); offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); if (error) { vnode_pager_setsize(vp, osize); return (error); } oip->i_size = length; if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, !DOINGASYNC(ovp))); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundary, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessible again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - EXT2_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->e2fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } } for (i = 0; i < EXT2_NDADDR; i++) { oldblks[i] = oip->i_db[i]; if (i > lastblock) oip->i_db[i] = 0; } oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ext2_update(ovp, !DOINGASYNC(ovp)); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < EXT2_NDADDR; i++) { newblks[i] = oip->i_db[i]; oip->i_db[i] = oldblks[i]; } for (i = 0; i < EXT2_NIADDR; i++) { newblks[EXT2_NDADDR + i] = oip->i_ib[i]; oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; } oip->i_size = osize; error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); if (error && (allerror == 0)) allerror = error; vnode_pager_setsize(ovp, length); /* * Indirect blocks first. */ indir_lbn[SINGLE] = -EXT2_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->e2fs_fsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = EXT2_NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ext2_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < EXT2_NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); BO_LOCK(bo); if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || bo->bo_clean.bv_cnt != 0)) panic("itrunc3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ oip->i_size = length; if (oip->i_blocks >= blocksreleased) oip->i_blocks -= blocksreleased; else /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); return (allerror); } static int ext2_ext_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct vnode *ovp = vp; int32_t lastblock; struct m_ext2fs *fs; struct inode *oip; struct buf *bp; uint32_t lbn, offset; int error, size; off_t osize; oip = VTOI(ovp); fs = oip->i_e2fs; osize = oip->i_size; if (osize < length) { if (length > oip->i_e2fs->e2fs_maxfilesize) { return (EFBIG); } vnode_pager_setsize(ovp, length); offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); if (error) { vnode_pager_setsize(vp, osize); return (error); } oip->i_size = length; if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, !DOINGASYNC(ovp))); } lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); if (error) return (error); offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); if (error) { return (error); } oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); } oip->i_size = osize; error = vtruncbuf(ovp, length, (int)fs->e2fs_bsize); if (error) return (error); vnode_pager_setsize(ovp, length); oip->i_size = length; oip->i_flag |= IN_CHANGE | IN_UPDATE; error = ext2_update(ovp, !DOINGASYNC(ovp)); return (error); } /* * Truncate the inode ip to at most length size, freeing the * disk blocks. */ int ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct inode *ip; int error; ASSERT_VOP_LOCKED(vp, "ext2_truncate"); if (length < 0) return (EINVAL); ip = VTOI(vp); if (vp->v_type == VLNK && ip->i_size < vp->v_mount->mnt_maxsymlinklen) { #ifdef INVARIANTS if (length != 0) panic("ext2_truncate: partial truncate of symlink"); #endif bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); ip->i_size = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(vp, 1)); } if (ip->i_size == length) { ip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(vp, 0)); } if (ip->i_flag & IN_E4EXTENTS) error = ext2_ext_truncate(vp, length, flags, cred, td); else error = ext2_ind_truncate(vp, length, flags, cred, td); return (error); } /* * discard preallocated blocks */ int ext2_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct thread *td = ap->a_td; int mode, error = 0; /* * Ignore inodes related to stale file handles. */ if (ip->i_mode == 0) goto out; if (ip->i_nlink <= 0) { ext2_extattr_free(ip); error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); if (!(ip->i_flag & IN_E4EXTENTS)) ip->i_rdev = 0; mode = ip->i_mode; ip->i_mode = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_vfree(vp, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) ext2_update(vp, 0); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip->i_mode == 0) vrecycle(vp); return (error); } /* * Reclaim an inode so that it can be used for other purposes. */ int ext2_reclaim(struct vop_reclaim_args *ap) { struct inode *ip; struct vnode *vp = ap->a_vp; ip = VTOI(vp); if (ip->i_flag & IN_LAZYMOD) { ip->i_flag |= IN_MODIFIED; ext2_update(vp, 0); } vfs_hash_remove(vp); free(vp->v_data, M_EXT2NODE); vp->v_data = 0; return (0); } Index: head/sys/fs/ext2fs/ext2_inode_cnv.c =================================================================== --- head/sys/fs/ext2fs/ext2_inode_cnv.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_inode_cnv.c (revision 361136) @@ -1,236 +1,264 @@ /*- * SPDX-License-Identifier: MIT-CMU * * Copyright (c) 1995 The University of Utah and * the Computer Systems Laboratory at the University of Utah (CSL). * All rights reserved. * * Permission to use, copy, modify and distribute this software is hereby * granted provided that (1) source code retains these copyright, permission, * and disclaimer notices, and (2) redistributions including binaries * reproduce the notices in supporting documentation, and (3) all advertising * materials mentioning features or use of this software display the following * acknowledgement: ``This product includes software developed by the * Computer Systems Laboratory at the University of Utah.'' * * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * CSL requests users of this software to return to csl-dist@cs.utah.edu any * improvements that they make and grant CSL redistribution rights. * * Utah $Hdr$ * $FreeBSD$ */ /* * routines to convert on disk ext2 inodes into inodes and back */ #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , trace, inode_cnv, "int", "char*"); -#define XTIME_TO_NSEC(x) ((x & EXT3_NSEC_MASK) >> 2) -#define NSEC_TO_XTIME(t) (le32toh(t << 2) & EXT3_NSEC_MASK) - #ifdef EXT2FS_PRINT_EXTENTS void ext2_print_inode(struct inode *in) { int i; struct ext4_extent_header *ehp; struct ext4_extent *ep; printf("Inode: %5ju", (uintmax_t)in->i_number); printf( /* "Inode: %5d" */ " Type: %10s Mode: 0x%o Flags: 0x%x Version: %d acl: 0x%jx\n", "n/a", in->i_mode, in->i_flags, in->i_gen, in->i_facl); printf("User: %5u Group: %5u Size: %ju\n", in->i_uid, in->i_gid, (uintmax_t)in->i_size); printf("Links: %3d Blockcount: %ju\n", in->i_nlink, (uintmax_t)in->i_blocks); printf("ctime: 0x%x ", in->i_ctime); printf("atime: 0x%x ", in->i_atime); printf("mtime: 0x%x ", in->i_mtime); if (E2DI_HAS_XTIME(in)) printf("crtime %#x\n", in->i_birthtime); else printf("\n"); if (in->i_flag & IN_E4EXTENTS) { printf("Extents:\n"); ehp = (struct ext4_extent_header *)in->i_db; printf("Header (magic 0x%x entries %d max %d depth %d gen %d)\n", - ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, - ehp->eh_gen); + le16toh(ehp->eh_magic), le16toh(ehp->eh_ecount), + le16toh(ehp->eh_max), le16toh(ehp->eh_depth), + le32toh(ehp->eh_gen)); ep = (struct ext4_extent *)(char *)(ehp + 1); - printf("Index (blk %d len %d start_lo %d start_hi %d)\n", ep->e_blk, - ep->e_len, ep->e_start_lo, ep->e_start_hi); + printf("Index (blk %d len %d start_lo %d start_hi %d)\n", + le32toh(ep->e_blk), + le16toh(ep->e_len), le32toh(ep->e_start_lo), + le16toh(ep->e_start_hi)); printf("\n"); } else { printf("BLOCKS:"); for (i = 0; i < (in->i_blocks <= 24 ? (in->i_blocks + 1) / 2 : 12); i++) printf(" %d", in->i_db[i]); printf("\n"); } } #endif /* EXT2FS_PRINT_EXTENTS */ + +#define XTIME_TO_NSEC(x) ((le32toh(x) & EXT3_NSEC_MASK) >> 2) + /* - * raw ext2 inode to inode + * raw ext2 inode LE to host inode conversion */ int ext2_ei2i(struct ext2fs_dinode *ei, struct inode *ip) { struct m_ext2fs *fs = ip->i_e2fs; + uint32_t ei_flags_host; + uint16_t ei_extra_isize_le; + int i; if ((ip->i_number < EXT2_FIRST_INO(fs) && ip->i_number != EXT2_ROOTINO) || (ip->i_number < EXT2_ROOTINO) || - (ip->i_number > fs->e2fs->e2fs_icount)) { + (ip->i_number > le32toh(fs->e2fs->e2fs_icount))) { SDT_PROBE2(ext2fs, , trace, inode_cnv, 1, "bad inode number"); return (EINVAL); } - if (ip->i_number == EXT2_ROOTINO && ei->e2di_nlink == 0) { + ip->i_nlink = le16toh(ei->e2di_nlink); + if (ip->i_number == EXT2_ROOTINO && ip->i_nlink == 0) { SDT_PROBE2(ext2fs, , trace, inode_cnv, 1, "root inode unallocated"); return (EINVAL); } - ip->i_nlink = ei->e2di_nlink; /* Check extra inode size */ + ei_extra_isize_le = le16toh(ei->e2di_extra_isize); if (EXT2_INODE_SIZE(fs) > E2FS_REV0_INODE_SIZE) { - if (E2FS_REV0_INODE_SIZE + ei->e2di_extra_isize > - EXT2_INODE_SIZE(fs) || (ei->e2di_extra_isize & 3)) { + if (E2FS_REV0_INODE_SIZE + ei_extra_isize_le > + EXT2_INODE_SIZE(fs) || (ei_extra_isize_le & 3)) { SDT_PROBE2(ext2fs, , trace, inode_cnv, 1, "bad extra inode size"); return (EINVAL); } } /* * Godmar thinks - if the link count is zero, then the inode is * unused - according to ext2 standards. Ufs marks this fact by * setting i_mode to zero - why ? I can see that this might lead to * problems in an undelete. */ - ip->i_mode = ei->e2di_nlink ? ei->e2di_mode : 0; - ip->i_size = ei->e2di_size; + ip->i_mode = ip->i_nlink ? le16toh(ei->e2di_mode) : 0; + ip->i_size = le32toh(ei->e2di_size); if (S_ISREG(ip->i_mode)) - ip->i_size |= ((u_int64_t)ei->e2di_size_high) << 32; - ip->i_atime = ei->e2di_atime; - ip->i_mtime = ei->e2di_mtime; - ip->i_ctime = ei->e2di_ctime; + ip->i_size |= (uint64_t)le32toh(ei->e2di_size_high) << 32; + ip->i_atime = le32toh(ei->e2di_atime); + ip->i_mtime = le32toh(ei->e2di_mtime); + ip->i_ctime = le32toh(ei->e2di_ctime); if (E2DI_HAS_XTIME(ip)) { - ip->i_atimensec = XTIME_TO_NSEC(ei->e2di_atime_extra); - ip->i_mtimensec = XTIME_TO_NSEC(ei->e2di_mtime_extra); - ip->i_ctimensec = XTIME_TO_NSEC(ei->e2di_ctime_extra); - ip->i_birthtime = ei->e2di_crtime; - ip->i_birthnsec = XTIME_TO_NSEC(ei->e2di_crtime_extra); + ip->i_atimensec = XTIME_TO_NSEC(le32toh(ei->e2di_atime_extra)); + ip->i_mtimensec = XTIME_TO_NSEC(le32toh(ei->e2di_mtime_extra)); + ip->i_ctimensec = XTIME_TO_NSEC(le32toh(ei->e2di_ctime_extra)); + ip->i_birthtime = le32toh(ei->e2di_crtime); + ip->i_birthnsec = XTIME_TO_NSEC(le32toh(ei->e2di_crtime_extra)); } ip->i_flags = 0; - ip->i_flags |= (ei->e2di_flags & EXT2_APPEND) ? SF_APPEND : 0; - ip->i_flags |= (ei->e2di_flags & EXT2_IMMUTABLE) ? SF_IMMUTABLE : 0; - ip->i_flags |= (ei->e2di_flags & EXT2_NODUMP) ? UF_NODUMP : 0; - ip->i_flag |= (ei->e2di_flags & EXT3_INDEX) ? IN_E3INDEX : 0; - ip->i_flag |= (ei->e2di_flags & EXT4_EXTENTS) ? IN_E4EXTENTS : 0; - ip->i_blocks = ei->e2di_nblock; - ip->i_facl = ei->e2di_facl; + ei_flags_host = le32toh(ei->e2di_flags); + ip->i_flags |= (ei_flags_host & EXT2_APPEND) ? SF_APPEND : 0; + ip->i_flags |= (ei_flags_host & EXT2_IMMUTABLE) ? SF_IMMUTABLE : 0; + ip->i_flags |= (ei_flags_host & EXT2_NODUMP) ? UF_NODUMP : 0; + ip->i_flag |= (ei_flags_host & EXT3_INDEX) ? IN_E3INDEX : 0; + ip->i_flag |= (ei_flags_host & EXT4_EXTENTS) ? IN_E4EXTENTS : 0; + ip->i_blocks = le32toh(ei->e2di_nblock); + ip->i_facl = le32toh(ei->e2di_facl); if (E2DI_HAS_HUGE_FILE(ip)) { - ip->i_blocks |= (uint64_t)ei->e2di_nblock_high << 32; - ip->i_facl |= (uint64_t)ei->e2di_facl_high << 32; - if (ei->e2di_flags & EXT4_HUGE_FILE) + ip->i_blocks |= (uint64_t)le16toh(ei->e2di_nblock_high) << 32; + ip->i_facl |= (uint64_t)le16toh(ei->e2di_facl_high) << 32; + if (ei_flags_host & EXT4_HUGE_FILE) ip->i_blocks = fsbtodb(ip->i_e2fs, ip->i_blocks); } - ip->i_gen = ei->e2di_gen; - ip->i_uid = ei->e2di_uid; - ip->i_gid = ei->e2di_gid; - ip->i_uid |= (uint32_t)ei->e2di_uid_high << 16; - ip->i_gid |= (uint32_t)ei->e2di_gid_high << 16; + ip->i_gen = le32toh(ei->e2di_gen); + ip->i_uid = le16toh(ei->e2di_uid); + ip->i_gid = le16toh(ei->e2di_gid); + ip->i_uid |= (uint32_t)le16toh(ei->e2di_uid_high) << 16; + ip->i_gid |= (uint32_t)le16toh(ei->e2di_gid_high) << 16; - memcpy(ip->i_data, ei->e2di_blocks, sizeof(ei->e2di_blocks)); + if ((ip->i_flag & IN_E4EXTENTS)) { + memcpy(ip->i_data, ei->e2di_blocks, sizeof(ei->e2di_blocks)); + } else { + for (i = 0; i < EXT2_NDADDR; i++) + ip->i_db[i] = le32toh(ei->e2di_blocks[i]); + for (i = 0; i < EXT2_NIADDR; i++) + ip->i_ib[i] = le32toh(ei->e2di_blocks[EXT2_NDIR_BLOCKS + i]); + } /* Verify inode csum. */ return (ext2_ei_csum_verify(ip, ei)); } +#define NSEC_TO_XTIME(t) (htole32((t << 2) & EXT3_NSEC_MASK)) + /* - * inode to raw ext2 inode + * inode to raw ext2 LE inode conversion */ int ext2_i2ei(struct inode *ip, struct ext2fs_dinode *ei) { struct m_ext2fs *fs; + int i; fs = ip->i_e2fs; - ei->e2di_mode = ip->i_mode; - ei->e2di_nlink = ip->i_nlink; + ei->e2di_mode = htole16(ip->i_mode); + ei->e2di_nlink = htole16(ip->i_nlink); + ei->e2di_size = htole32(ip->i_size); + if (S_ISREG(ip->i_mode)) + ei->e2di_size_high = htole32(ip->i_size >> 32); + ei->e2di_atime = htole32(ip->i_atime); + ei->e2di_mtime = htole32(ip->i_mtime); + ei->e2di_ctime = htole32(ip->i_ctime); /* * Godmar thinks: if dtime is nonzero, ext2 says this inode has been * deleted, this would correspond to a zero link count */ - ei->e2di_dtime = ei->e2di_nlink ? 0 : ip->i_mtime; - ei->e2di_size = ip->i_size; - if (S_ISREG(ip->i_mode)) - ei->e2di_size_high = ip->i_size >> 32; - ei->e2di_atime = ip->i_atime; - ei->e2di_mtime = ip->i_mtime; - ei->e2di_ctime = ip->i_ctime; + ei->e2di_dtime = htole32(le16toh(ei->e2di_nlink) ? 0 : + le32toh(ei->e2di_mtime)); if (E2DI_HAS_XTIME(ip)) { - ei->e2di_ctime_extra = NSEC_TO_XTIME(ip->i_ctimensec); - ei->e2di_mtime_extra = NSEC_TO_XTIME(ip->i_mtimensec); - ei->e2di_atime_extra = NSEC_TO_XTIME(ip->i_atimensec); - ei->e2di_crtime = ip->i_birthtime; - ei->e2di_crtime_extra = NSEC_TO_XTIME(ip->i_birthnsec); + ei->e2di_ctime_extra = htole32(NSEC_TO_XTIME(ip->i_ctimensec)); + ei->e2di_mtime_extra = htole32(NSEC_TO_XTIME(ip->i_mtimensec)); + ei->e2di_atime_extra = htole32(NSEC_TO_XTIME(ip->i_atimensec)); + ei->e2di_crtime = htole32(ip->i_birthtime); + ei->e2di_crtime_extra = htole32(NSEC_TO_XTIME(ip->i_birthnsec)); } + /* Keep these in host endian for a while since they change a lot */ ei->e2di_flags = 0; - ei->e2di_flags |= (ip->i_flags & SF_APPEND) ? EXT2_APPEND : 0; - ei->e2di_flags |= (ip->i_flags & SF_IMMUTABLE) ? EXT2_IMMUTABLE : 0; - ei->e2di_flags |= (ip->i_flags & UF_NODUMP) ? EXT2_NODUMP : 0; - ei->e2di_flags |= (ip->i_flag & IN_E3INDEX) ? EXT3_INDEX : 0; - ei->e2di_flags |= (ip->i_flag & IN_E4EXTENTS) ? EXT4_EXTENTS : 0; + ei->e2di_flags |= htole32((ip->i_flags & SF_APPEND) ? EXT2_APPEND : 0); + ei->e2di_flags |= htole32((ip->i_flags & SF_IMMUTABLE) ? EXT2_IMMUTABLE : 0); + ei->e2di_flags |= htole32((ip->i_flags & UF_NODUMP) ? EXT2_NODUMP : 0); + ei->e2di_flags |= htole32((ip->i_flag & IN_E3INDEX) ? EXT3_INDEX : 0); + ei->e2di_flags |= htole32((ip->i_flag & IN_E4EXTENTS) ? EXT4_EXTENTS : 0); if (ip->i_blocks > ~0U && !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE)) { SDT_PROBE2(ext2fs, , trace, inode_cnv, 1, "i_blocks value is out of range"); return (EIO); } if (ip->i_blocks <= 0xffffffffffffULL) { - ei->e2di_nblock = ip->i_blocks & 0xffffffff; - ei->e2di_nblock_high = ip->i_blocks >> 32 & 0xffff; + ei->e2di_nblock = htole32(ip->i_blocks & 0xffffffff); + ei->e2di_nblock_high = htole16(ip->i_blocks >> 32 & 0xffff); } else { - ei->e2di_flags |= EXT4_HUGE_FILE; - ei->e2di_nblock = dbtofsb(fs, ip->i_blocks); - ei->e2di_nblock_high = dbtofsb(fs, ip->i_blocks) >> 32 & 0xffff; + ei->e2di_flags |= htole32(EXT4_HUGE_FILE); + ei->e2di_nblock = htole32(dbtofsb(fs, ip->i_blocks)); + ei->e2di_nblock_high = htole16(dbtofsb(fs, ip->i_blocks) >> 32 & 0xffff); } - ei->e2di_facl = ip->i_facl & 0xffffffff; - ei->e2di_facl_high = ip->i_facl >> 32 & 0xffff; - ei->e2di_gen = ip->i_gen; - ei->e2di_uid = ip->i_uid & 0xffff; - ei->e2di_uid_high = ip->i_uid >> 16 & 0xffff; - ei->e2di_gid = ip->i_gid & 0xffff; - ei->e2di_gid_high = ip->i_gid >> 16 & 0xffff; - memcpy(ei->e2di_blocks, ip->i_data, sizeof(ei->e2di_blocks)); + ei->e2di_facl = htole32(ip->i_facl & 0xffffffff); + ei->e2di_facl_high = htole16(ip->i_facl >> 32 & 0xffff); + ei->e2di_gen = htole32(ip->i_gen); + ei->e2di_uid = htole16(ip->i_uid & 0xffff); + ei->e2di_uid_high = htole16(ip->i_uid >> 16 & 0xffff); + ei->e2di_gid = htole16(ip->i_gid & 0xffff); + ei->e2di_gid_high = htole16(ip->i_gid >> 16 & 0xffff); + + if ((ip->i_flag & IN_E4EXTENTS)) { + memcpy(ei->e2di_blocks, ip->i_data, sizeof(ei->e2di_blocks)); + } else { + for (i = 0; i < EXT2_NDADDR; i++) + ei->e2di_blocks[i] = htole32(ip->i_db[i]); + for (i = 0; i < EXT2_NIADDR; i++) + ei->e2di_blocks[EXT2_NDIR_BLOCKS + i] = htole32(ip->i_ib[i]); + } /* Set inode csum. */ ext2_ei_csum_set(ip, ei); return (0); } Index: head/sys/fs/ext2fs/ext2_lookup.c =================================================================== --- head/sys/fs/ext2fs/ext2_lookup.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_lookup.c (revision 361136) @@ -1,1295 +1,1299 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_lookup.c 8.6 (Berkeley) 4/1/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , lookup, trace, "int", "char*"); SDT_PROBE_DEFINE4(ext2fs, , trace, ext2_dirbad_error, "char*", "ino_t", "doff_t", "char*"); SDT_PROBE_DEFINE5(ext2fs, , trace, ext2_dirbadentry_error, "char*", "int", "uint32_t", "uint16_t", "uint8_t"); #ifdef INVARIANTS static int dirchk = 1; #else static int dirchk = 0; #endif static SYSCTL_NODE(_vfs, OID_AUTO, e2fs, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "EXT2FS filesystem"); SYSCTL_INT(_vfs_e2fs, OID_AUTO, dircheck, CTLFLAG_RW, &dirchk, 0, ""); /* DIRBLKSIZE in ffs is DEV_BSIZE (in most cases 512) while it is the native blocksize in ext2fs - thus, a #define is no longer appropriate */ #undef DIRBLKSIZ static u_char ext2_ft_to_dt[] = { DT_UNKNOWN, /* EXT2_FT_UNKNOWN */ DT_REG, /* EXT2_FT_REG_FILE */ DT_DIR, /* EXT2_FT_DIR */ DT_CHR, /* EXT2_FT_CHRDEV */ DT_BLK, /* EXT2_FT_BLKDEV */ DT_FIFO, /* EXT2_FT_FIFO */ DT_SOCK, /* EXT2_FT_SOCK */ DT_LNK, /* EXT2_FT_SYMLINK */ }; #define FTTODT(ft) \ ((ft) < nitems(ext2_ft_to_dt) ? ext2_ft_to_dt[(ft)] : DT_UNKNOWN) static u_char dt_to_ext2_ft[] = { EXT2_FT_UNKNOWN, /* DT_UNKNOWN */ EXT2_FT_FIFO, /* DT_FIFO */ EXT2_FT_CHRDEV, /* DT_CHR */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_DIR, /* DT_DIR */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_BLKDEV, /* DT_BLK */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_REG_FILE, /* DT_REG */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_SYMLINK, /* DT_LNK */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_SOCK, /* DT_SOCK */ EXT2_FT_UNKNOWN, /* unused */ EXT2_FT_UNKNOWN, /* DT_WHT */ }; #define DTTOFT(dt) \ ((dt) < nitems(dt_to_ext2_ft) ? dt_to_ext2_ft[(dt)] : EXT2_FT_UNKNOWN) static int ext2_dirbadentry(struct vnode *dp, struct ext2fs_direct_2 *de, int entryoffsetinblock); static int ext2_is_dot_entry(struct componentname *cnp); static int ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp, ino_t *dd_ino); static int ext2_is_dot_entry(struct componentname *cnp) { if (cnp->cn_namelen <= 2 && cnp->cn_nameptr[0] == '.' && (cnp->cn_nameptr[1] == '.' || cnp->cn_nameptr[1] == '\0')) return (1); return (0); } /* * Vnode op for reading directories. */ int ext2_readdir(struct vop_readdir_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct buf *bp; struct inode *ip; struct ext2fs_direct_2 *dp, *edp; u_long *cookies; struct dirent dstdp; off_t offset, startoffset; size_t readcnt, skipcnt; ssize_t startresid; u_int ncookies; int DIRBLKSIZ = VTOI(ap->a_vp)->i_e2fs->e2fs_bsize; int error; if (uio->uio_offset < 0) return (EINVAL); ip = VTOI(vp); if (ap->a_ncookies != NULL) { if (uio->uio_resid < 0) ncookies = 0; else ncookies = uio->uio_resid; if (uio->uio_offset >= ip->i_size) ncookies = 0; else if (ip->i_size - uio->uio_offset < ncookies) ncookies = ip->i_size - uio->uio_offset; ncookies = ncookies / (offsetof(struct ext2fs_direct_2, e2d_namlen) + 4) + 1; cookies = malloc(ncookies * sizeof(*cookies), M_TEMP, M_WAITOK); *ap->a_ncookies = ncookies; *ap->a_cookies = cookies; } else { ncookies = 0; cookies = NULL; } offset = startoffset = uio->uio_offset; startresid = uio->uio_resid; error = 0; while (error == 0 && uio->uio_resid > 0 && uio->uio_offset < ip->i_size) { error = ext2_blkatoff(vp, uio->uio_offset, NULL, &bp); if (error) break; if (bp->b_offset + bp->b_bcount > ip->i_size) readcnt = ip->i_size - bp->b_offset; else readcnt = bp->b_bcount; skipcnt = (size_t)(uio->uio_offset - bp->b_offset) & ~(size_t)(DIRBLKSIZ - 1); offset = bp->b_offset + skipcnt; dp = (struct ext2fs_direct_2 *)&bp->b_data[skipcnt]; edp = (struct ext2fs_direct_2 *)&bp->b_data[readcnt]; while (error == 0 && uio->uio_resid > 0 && dp < edp) { - if (dp->e2d_reclen <= offsetof(struct ext2fs_direct_2, - e2d_namlen) || (caddr_t)dp + dp->e2d_reclen > + if (le16toh(dp->e2d_reclen) <= offsetof(struct ext2fs_direct_2, + e2d_namlen) || (caddr_t)dp + le16toh(dp->e2d_reclen) > (caddr_t)edp) { error = EIO; break; } /*- * "New" ext2fs directory entries differ in 3 ways * from ufs on-disk ones: * - the name is not necessarily NUL-terminated. * - the file type field always exists and always * follows the name length field. * - the file type is encoded in a different way. * * "Old" ext2fs directory entries need no special * conversions, since they are binary compatible * with "new" entries having a file type of 0 (i.e., * EXT2_FT_UNKNOWN). Splitting the old name length * field didn't make a mess like it did in ufs, * because ext2fs uses a machine-independent disk * layout. */ dstdp.d_namlen = dp->e2d_namlen; dstdp.d_type = FTTODT(dp->e2d_type); if (offsetof(struct ext2fs_direct_2, e2d_namlen) + - dstdp.d_namlen > dp->e2d_reclen) { + dstdp.d_namlen > le16toh(dp->e2d_reclen)) { error = EIO; break; } - if (offset < startoffset || dp->e2d_ino == 0) + if (offset < startoffset || le32toh(dp->e2d_ino) == 0) goto nextentry; - dstdp.d_fileno = dp->e2d_ino; + dstdp.d_fileno = le32toh(dp->e2d_ino); dstdp.d_reclen = GENERIC_DIRSIZ(&dstdp); bcopy(dp->e2d_name, dstdp.d_name, dstdp.d_namlen); /* NOTE: d_off is the offset of the *next* entry. */ - dstdp.d_off = offset + dp->e2d_reclen; + dstdp.d_off = offset + le16toh(dp->e2d_reclen); dirent_terminate(&dstdp); if (dstdp.d_reclen > uio->uio_resid) { if (uio->uio_resid == startresid) error = EINVAL; else error = EJUSTRETURN; break; } /* Advance dp. */ error = uiomove((caddr_t)&dstdp, dstdp.d_reclen, uio); if (error) break; if (cookies != NULL) { KASSERT(ncookies > 0, ("ext2_readdir: cookies buffer too small")); - *cookies = offset + dp->e2d_reclen; + *cookies = offset + le16toh(dp->e2d_reclen); cookies++; ncookies--; } nextentry: - offset += dp->e2d_reclen; + offset += le16toh(dp->e2d_reclen); dp = (struct ext2fs_direct_2 *)((caddr_t)dp + - dp->e2d_reclen); + le16toh(dp->e2d_reclen)); } bqrelse(bp); uio->uio_offset = offset; } /* We need to correct uio_offset. */ uio->uio_offset = offset; if (error == EJUSTRETURN) error = 0; if (ap->a_ncookies != NULL) { if (error == 0) { ap->a_ncookies -= ncookies; } else { free(*ap->a_cookies, M_TEMP); *ap->a_ncookies = 0; *ap->a_cookies = NULL; } } if (error == 0 && ap->a_eofflag) *ap->a_eofflag = ip->i_size <= uio->uio_offset; return (error); } /* * Convert a component of a pathname into a pointer to a locked inode. * This is a very central and rather complicated routine. * If the file system is not maintained in a strict tree hierarchy, * this can result in a deadlock situation (see comments in code below). * * The cnp->cn_nameiop argument is LOOKUP, CREATE, RENAME, or DELETE depending * on whether the name is to be looked up, created, renamed, or deleted. * When CREATE, RENAME, or DELETE is specified, information usable in * creating, renaming, or deleting a directory entry may be calculated. * If flag has LOCKPARENT or'ed into it and the target of the pathname * exists, lookup returns both the target and its parent directory locked. * When creating or renaming and LOCKPARENT is specified, the target may * not be ".". When deleting and LOCKPARENT is specified, the target may * be "."., but the caller must check to ensure it does an vrele and vput * instead of two vputs. * * Overall outline of ext2_lookup: * * search for name in directory, to found or notfound * notfound: * if creating, return locked directory, leaving info on available slots * else return error * found: * if at end of path and deleting, return information to allow delete * if at end of path and rewriting (RENAME and LOCKPARENT), lock target * inode and return info to allow rewrite * if not at end, add name to cache; if at end and neither creating * nor deleting, add name to cache */ int ext2_lookup(struct vop_cachedlookup_args *ap) { return (ext2_lookup_ino(ap->a_dvp, ap->a_vpp, ap->a_cnp, NULL)); } static int ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp, ino_t *dd_ino) { struct inode *dp; /* inode for directory being searched */ struct buf *bp; /* a buffer of directory entries */ struct ext2fs_direct_2 *ep; /* the current directory entry */ int entryoffsetinblock; /* offset of ep in bp's buffer */ struct ext2fs_searchslot ss; doff_t i_diroff; /* cached i_diroff value */ doff_t i_offset; /* cached i_offset value */ int numdirpasses; /* strategy for directory search */ doff_t endsearch; /* offset to end directory search */ doff_t prevoff; /* prev entry dp->i_offset */ struct vnode *pdp; /* saved dp during symlink work */ struct vnode *tdp; /* returned by VFS_VGET */ doff_t enduseful; /* pointer past last used dir slot */ u_long bmask; /* block offset mask */ int error; struct ucred *cred = cnp->cn_cred; int flags = cnp->cn_flags; int nameiop = cnp->cn_nameiop; ino_t ino, ino1; int ltype; int entry_found = 0; int DIRBLKSIZ = VTOI(vdp)->i_e2fs->e2fs_bsize; if (vpp != NULL) *vpp = NULL; dp = VTOI(vdp); bmask = VFSTOEXT2(vdp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; restart: bp = NULL; ss.slotoffset = -1; /* * We now have a segment name to search for, and a directory to search. * * Suppress search for slots unless creating * file and at end of pathname, in which case * we watch for a place to put the new file in * case it doesn't already exist. */ i_diroff = dp->i_diroff; ss.slotstatus = FOUND; ss.slotfreespace = ss.slotsize = ss.slotneeded = 0; if ((nameiop == CREATE || nameiop == RENAME) && (flags & ISLASTCN)) { ss.slotstatus = NONE; ss.slotneeded = EXT2_DIR_REC_LEN(cnp->cn_namelen); /* * was ss.slotneeded = (sizeof(struct direct) - MAXNAMLEN + * cnp->cn_namelen + 3) &~ 3; */ } /* * Try to lookup dir entry using htree directory index. * * If we got an error or we want to find '.' or '..' entry, * we will fall back to linear search. */ if (!ext2_is_dot_entry(cnp) && ext2_htree_has_idx(dp)) { numdirpasses = 1; entryoffsetinblock = 0; switch (ext2_htree_lookup(dp, cnp->cn_nameptr, cnp->cn_namelen, &bp, &entryoffsetinblock, &i_offset, &prevoff, &enduseful, &ss)) { case 0: ep = (struct ext2fs_direct_2 *)((char *)bp->b_data + (i_offset & bmask)); goto foundentry; case ENOENT: i_offset = roundup2(dp->i_size, DIRBLKSIZ); goto notfound; default: /* * Something failed; just fallback to do a linear * search. */ break; } } /* * If there is cached information on a previous search of * this directory, pick up where we last left off. * We cache only lookups as these are the most common * and have the greatest payoff. Caching CREATE has little * benefit as it usually must search the entire directory * to determine that the entry does not exist. Caching the * location of the last DELETE or RENAME has not reduced * profiling time and hence has been removed in the interest * of simplicity. */ if (nameiop != LOOKUP || i_diroff == 0 || i_diroff > dp->i_size) { entryoffsetinblock = 0; i_offset = 0; numdirpasses = 1; } else { i_offset = i_diroff; if ((entryoffsetinblock = i_offset & bmask) && (error = ext2_blkatoff(vdp, (off_t)i_offset, NULL, &bp))) return (error); numdirpasses = 2; nchstats.ncs_2passes++; } prevoff = i_offset; endsearch = roundup2(dp->i_size, DIRBLKSIZ); enduseful = 0; searchloop: while (i_offset < endsearch) { /* * If necessary, get the next directory block. */ if (bp != NULL) brelse(bp); error = ext2_blkatoff(vdp, (off_t)i_offset, NULL, &bp); if (error != 0) return (error); entryoffsetinblock = 0; if (ss.slotstatus == NONE) { ss.slotoffset = -1; ss.slotfreespace = 0; } error = ext2_search_dirblock(dp, bp->b_data, &entry_found, cnp->cn_nameptr, cnp->cn_namelen, &entryoffsetinblock, &i_offset, &prevoff, &enduseful, &ss); if (error != 0) { brelse(bp); return (error); } if (entry_found) { ep = (struct ext2fs_direct_2 *)((char *)bp->b_data + (entryoffsetinblock & bmask)); foundentry: - ino = ep->e2d_ino; + ino = le32toh(ep->e2d_ino); goto found; } } notfound: /* * If we started in the middle of the directory and failed * to find our target, we must check the beginning as well. */ if (numdirpasses == 2) { numdirpasses--; i_offset = 0; endsearch = i_diroff; goto searchloop; } if (bp != NULL) brelse(bp); /* * If creating, and at end of pathname and current * directory has not been removed, then can consider * allowing file to be created. */ if ((nameiop == CREATE || nameiop == RENAME) && (flags & ISLASTCN) && dp->i_nlink != 0) { /* * Access for write is interpreted as allowing * creation of files in the directory. */ if ((error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_thread)) != 0) return (error); /* * Return an indication of where the new directory * entry should be put. If we didn't find a slot, * then set dp->i_count to 0 indicating * that the new slot belongs at the end of the * directory. If we found a slot, then the new entry * can be put in the range from dp->i_offset to * dp->i_offset + dp->i_count. */ if (ss.slotstatus == NONE) { dp->i_offset = roundup2(dp->i_size, DIRBLKSIZ); dp->i_count = 0; enduseful = dp->i_offset; } else { dp->i_offset = ss.slotoffset; dp->i_count = ss.slotsize; if (enduseful < ss.slotoffset + ss.slotsize) enduseful = ss.slotoffset + ss.slotsize; } dp->i_endoff = roundup2(enduseful, DIRBLKSIZ); /* * We return with the directory locked, so that * the parameters we set up above will still be * valid if we actually decide to do a direnter(). * We return ni_vp == NULL to indicate that the entry * does not currently exist; we leave a pointer to * the (locked) directory inode in ndp->ni_dvp. * The pathname buffer is saved so that the name * can be obtained later. * * NB - if the directory is unlocked, then this * information cannot be used. */ cnp->cn_flags |= SAVENAME; return (EJUSTRETURN); } /* * Insert name into cache (as non-existent) if appropriate. */ if ((cnp->cn_flags & MAKEENTRY) != 0) cache_enter(vdp, NULL, cnp); return (ENOENT); found: if (dd_ino != NULL) *dd_ino = ino; if (numdirpasses == 2) nchstats.ncs_pass2++; /* * Check that directory length properly reflects presence * of this entry. */ - if (entryoffsetinblock + EXT2_DIR_REC_LEN(ep->e2d_namlen) - > dp->i_size) { + if (entryoffsetinblock + EXT2_DIR_REC_LEN(ep->e2d_namlen) > + dp->i_size) { ext2_dirbad(dp, i_offset, "i_size too small"); dp->i_size = entryoffsetinblock + EXT2_DIR_REC_LEN(ep->e2d_namlen); dp->i_flag |= IN_CHANGE | IN_UPDATE; } brelse(bp); /* * Found component in pathname. * If the final component of path name, save information * in the cache as to where the entry was found. */ if ((flags & ISLASTCN) && nameiop == LOOKUP) dp->i_diroff = rounddown2(i_offset, DIRBLKSIZ); /* * If deleting, and at end of pathname, return * parameters which can be used to remove file. */ if (nameiop == DELETE && (flags & ISLASTCN)) { if (flags & LOCKPARENT) ASSERT_VOP_ELOCKED(vdp, __FUNCTION__); /* * Write access to directory required to delete files. */ if ((error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_thread)) != 0) return (error); /* * Return pointer to current entry in dp->i_offset, * and distance past previous entry (if there * is a previous entry in this block) in dp->i_count. * Save directory inode pointer in ndp->ni_dvp for dirremove(). * * Technically we shouldn't be setting these in the * WANTPARENT case (first lookup in rename()), but any * lookups that will result in directory changes will * overwrite these. */ dp->i_offset = i_offset; if ((dp->i_offset & (DIRBLKSIZ - 1)) == 0) dp->i_count = 0; else dp->i_count = dp->i_offset - prevoff; if (dd_ino != NULL) return (0); if (dp->i_number == ino) { VREF(vdp); *vpp = vdp; return (0); } if ((error = VFS_VGET(vdp->v_mount, ino, LK_EXCLUSIVE, &tdp)) != 0) return (error); /* * If directory is "sticky", then user must own * the directory, or the file in it, else she * may not delete it (unless she's root). This * implements append-only directories. */ if ((dp->i_mode & ISVTX) && cred->cr_uid != 0 && cred->cr_uid != dp->i_uid && VTOI(tdp)->i_uid != cred->cr_uid) { vput(tdp); return (EPERM); } *vpp = tdp; return (0); } /* * If rewriting (RENAME), return the inode and the * information required to rewrite the present directory * Must get inode of directory entry to verify it's a * regular file, or empty directory. */ if (nameiop == RENAME && (flags & ISLASTCN)) { if ((error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_thread)) != 0) return (error); /* * Careful about locking second inode. * This can only occur if the target is ".". */ dp->i_offset = i_offset; if (dp->i_number == ino) return (EISDIR); if (dd_ino != NULL) return (0); if ((error = VFS_VGET(vdp->v_mount, ino, LK_EXCLUSIVE, &tdp)) != 0) return (error); *vpp = tdp; cnp->cn_flags |= SAVENAME; return (0); } if (dd_ino != NULL) return (0); /* * Step through the translation in the name. We do not `vput' the * directory because we may need it again if a symbolic link * is relative to the current directory. Instead we save it * unlocked as "pdp". We must get the target inode before unlocking * the directory to insure that the inode will not be removed * before we get it. We prevent deadlock by always fetching * inodes from the root, moving down the directory tree. Thus * when following backward pointers ".." we must unlock the * parent directory before getting the requested directory. * There is a potential race condition here if both the current * and parent directories are removed before the VFS_VGET for the * inode associated with ".." returns. We hope that this occurs * infrequently since we cannot avoid this race condition without * implementing a sophisticated deadlock detection algorithm. * Note also that this simple deadlock detection scheme will not * work if the file system has any hard links other than ".." * that point backwards in the directory structure. */ pdp = vdp; if (flags & ISDOTDOT) { error = vn_vget_ino(pdp, ino, cnp->cn_lkflags, &tdp); if (VN_IS_DOOMED(pdp)) { if (error == 0) vput(tdp); error = ENOENT; } if (error) return (error); /* * Recheck that ".." entry in the vdp directory points * to the inode we looked up before vdp lock was * dropped. */ error = ext2_lookup_ino(pdp, NULL, cnp, &ino1); if (error) { vput(tdp); return (error); } if (ino1 != ino) { vput(tdp); goto restart; } *vpp = tdp; } else if (dp->i_number == ino) { VREF(vdp); /* we want ourself, ie "." */ /* * When we lookup "." we still can be asked to lock it * differently. */ ltype = cnp->cn_lkflags & LK_TYPE_MASK; if (ltype != VOP_ISLOCKED(vdp)) { if (ltype == LK_EXCLUSIVE) vn_lock(vdp, LK_UPGRADE | LK_RETRY); else /* if (ltype == LK_SHARED) */ vn_lock(vdp, LK_DOWNGRADE | LK_RETRY); } *vpp = vdp; } else { if ((error = VFS_VGET(vdp->v_mount, ino, cnp->cn_lkflags, &tdp)) != 0) return (error); *vpp = tdp; } /* * Insert name into cache if appropriate. */ if (cnp->cn_flags & MAKEENTRY) cache_enter(vdp, *vpp, cnp); return (0); } int ext2_search_dirblock(struct inode *ip, void *data, int *foundp, const char *name, int namelen, int *entryoffsetinblockp, doff_t *offp, doff_t *prevoffp, doff_t *endusefulp, struct ext2fs_searchslot *ssp) { struct vnode *vdp; struct ext2fs_direct_2 *ep, *top; uint32_t bsize = ip->i_e2fs->e2fs_bsize; int offset = *entryoffsetinblockp; int namlen; vdp = ITOV(ip); ep = (struct ext2fs_direct_2 *)((char *)data + offset); top = (struct ext2fs_direct_2 *)((char *)data + bsize); while (ep < top) { /* * Full validation checks are slow, so we only check * enough to insure forward progress through the * directory. Complete checks can be run by setting * "vfs.e2fs.dirchk" to be true. */ - if (ep->e2d_reclen == 0 || + if (le16toh(ep->e2d_reclen) == 0 || (dirchk && ext2_dirbadentry(vdp, ep, offset))) { int i; ext2_dirbad(ip, *offp, "mangled entry"); i = bsize - (offset & (bsize - 1)); *offp += i; offset += i; continue; } /* * If an appropriate sized slot has not yet been found, * check to see if one is available. Also accumulate space * in the current block so that we can determine if * compaction is viable. */ if (ssp->slotstatus != FOUND) { - int size = ep->e2d_reclen; + int size = le16toh(ep->e2d_reclen); if (ep->e2d_ino != 0) size -= EXT2_DIR_REC_LEN(ep->e2d_namlen); else if (ext2_is_dirent_tail(ip, ep)) size -= sizeof(struct ext2fs_direct_tail); if (size > 0) { if (size >= ssp->slotneeded) { ssp->slotstatus = FOUND; ssp->slotoffset = *offp; - ssp->slotsize = ep->e2d_reclen; + ssp->slotsize = le16toh(ep->e2d_reclen); } else if (ssp->slotstatus == NONE) { ssp->slotfreespace += size; if (ssp->slotoffset == -1) ssp->slotoffset = *offp; if (ssp->slotfreespace >= ssp->slotneeded) { ssp->slotstatus = COMPACT; ssp->slotsize = *offp + - ep->e2d_reclen - + le16toh(ep->e2d_reclen) - ssp->slotoffset; } } } } /* * Check for a name match. */ - if (ep->e2d_ino) { + if (ep->e2d_ino != 0) { namlen = ep->e2d_namlen; if (namlen == namelen && !bcmp(name, ep->e2d_name, (unsigned)namlen)) { /* * Save directory entry's inode number and * reclen in ndp->ni_ufs area, and release * directory buffer. */ *foundp = 1; return (0); } } *prevoffp = *offp; - *offp += ep->e2d_reclen; - offset += ep->e2d_reclen; + *offp += le16toh(ep->e2d_reclen); + offset += le16toh(ep->e2d_reclen); *entryoffsetinblockp = offset; - if (ep->e2d_ino) + if (ep->e2d_ino != 0) *endusefulp = *offp; /* * Get pointer to the next entry. */ ep = (struct ext2fs_direct_2 *)((char *)data + offset); } return (0); } void ext2_dirbad(struct inode *ip, doff_t offset, char *how) { struct mount *mp; mp = ITOV(ip)->v_mount; if ((mp->mnt_flag & MNT_RDONLY) == 0) panic("ext2_dirbad: %s: bad dir ino %ju at offset %ld: %s\n", mp->mnt_stat.f_mntonname, (uintmax_t)ip->i_number, (long)offset, how); else SDT_PROBE4(ext2fs, , trace, ext2_dirbad_error, mp->mnt_stat.f_mntonname, ip->i_number, offset, how); } /* * Do consistency checking on a directory entry: * record length must be multiple of 4 * entry must fit in rest of its DIRBLKSIZ block * record must be large enough to contain entry * name is not longer than MAXNAMLEN * name must be as long as advertised, and null terminated */ /* * changed so that it confirms to ext2_check_dir_entry */ static int ext2_dirbadentry(struct vnode *dp, struct ext2fs_direct_2 *de, int entryoffsetinblock) { int DIRBLKSIZ = VTOI(dp)->i_e2fs->e2fs_bsize; char *error_msg = NULL; - if (de->e2d_reclen < EXT2_DIR_REC_LEN(1)) + if (le16toh(de->e2d_reclen) < EXT2_DIR_REC_LEN(1)) error_msg = "rec_len is smaller than minimal"; - else if (de->e2d_reclen % 4 != 0) + else if (le16toh(de->e2d_reclen) % 4 != 0) error_msg = "rec_len % 4 != 0"; - else if (de->e2d_reclen < EXT2_DIR_REC_LEN(de->e2d_namlen)) + else if (le16toh(de->e2d_reclen) < EXT2_DIR_REC_LEN(de->e2d_namlen)) error_msg = "reclen is too small for name_len"; - else if (entryoffsetinblock + de->e2d_reclen > DIRBLKSIZ) + else if (entryoffsetinblock + le16toh(de->e2d_reclen)> DIRBLKSIZ) error_msg = "directory entry across blocks"; /* else LATER if (de->inode > dir->i_sb->u.ext2_sb.s_es->s_inodes_count) error_msg = "inode out of bounds"; */ if (error_msg != NULL) { SDT_PROBE5(ext2fs, , trace, ext2_dirbadentry_error, error_msg, entryoffsetinblock, - de->e2d_ino, de->e2d_reclen, de->e2d_namlen); + le32toh(de->e2d_ino), le16toh(de->e2d_reclen), + de->e2d_namlen); } - return error_msg == NULL ? 0 : 1; + return (error_msg == NULL ? 0 : 1); } /* * Insert an entry into the fresh directory block. * Initialize entry tail if the metadata_csum feature is turned on. */ static int ext2_add_first_entry(struct vnode *dvp, struct ext2fs_direct_2 *entry, struct componentname *cnp) { struct inode *dp; struct iovec aiov; struct uio auio; char* buf = NULL; int dirblksize, error; dp = VTOI(dvp); dirblksize = dp->i_e2fs->e2fs_bsize; if (dp->i_offset & (dirblksize - 1)) panic("ext2_add_first_entry: bad directory offset"); if (EXT2_HAS_RO_COMPAT_FEATURE(dp->i_e2fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { - entry->e2d_reclen = dirblksize - sizeof(struct ext2fs_direct_tail); + entry->e2d_reclen = htole16(dirblksize - + sizeof(struct ext2fs_direct_tail)); buf = malloc(dirblksize, M_TEMP, M_WAITOK); if (!buf) { error = ENOMEM; goto out; } memcpy(buf, entry, EXT2_DIR_REC_LEN(entry->e2d_namlen)); ext2_init_dirent_tail(EXT2_DIRENT_TAIL(buf, dirblksize)); ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)buf); auio.uio_offset = dp->i_offset; auio.uio_resid = dirblksize; aiov.iov_len = auio.uio_resid; aiov.iov_base = (caddr_t)buf; } else { - entry->e2d_reclen = dirblksize; + entry->e2d_reclen = htole16(dirblksize); auio.uio_offset = dp->i_offset; auio.uio_resid = EXT2_DIR_REC_LEN(entry->e2d_namlen); aiov.iov_len = auio.uio_resid; aiov.iov_base = (caddr_t)entry; } auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = (struct thread *)0; error = VOP_WRITE(dvp, &auio, IO_SYNC, cnp->cn_cred); if (error) goto out; dp->i_size = roundup2(dp->i_size, dirblksize); dp->i_flag |= IN_CHANGE; out: free(buf, M_TEMP); return (error); } /* * Write a directory entry after a call to namei, using the parameters * that it left in nameidata. The argument ip is the inode which the new * directory entry will refer to. Dvp is a pointer to the directory to * be written, which was left locked by namei. Remaining parameters * (dp->i_offset, dp->i_count) indicate how the space for the new * entry is to be obtained. */ int ext2_direnter(struct inode *ip, struct vnode *dvp, struct componentname *cnp) { struct inode *dp; struct ext2fs_direct_2 newdir; int DIRBLKSIZ = ip->i_e2fs->e2fs_bsize; int error; #ifdef INVARIANTS if ((cnp->cn_flags & SAVENAME) == 0) panic("ext2_direnter: missing name"); #endif dp = VTOI(dvp); - newdir.e2d_ino = ip->i_number; - newdir.e2d_namlen = cnp->cn_namelen; + newdir.e2d_ino = htole32(ip->i_number); if (EXT2_HAS_INCOMPAT_FEATURE(ip->i_e2fs, - EXT2F_INCOMPAT_FTYPE)) + EXT2F_INCOMPAT_FTYPE)) { + newdir.e2d_namlen = cnp->cn_namelen; newdir.e2d_type = DTTOFT(IFTODT(ip->i_mode)); - else - newdir.e2d_type = EXT2_FT_UNKNOWN; + } else + newdir.e2d_namlen = htole16(cnp->cn_namelen); + bcopy(cnp->cn_nameptr, newdir.e2d_name, (unsigned)cnp->cn_namelen + 1); if (ext2_htree_has_idx(dp)) { error = ext2_htree_add_entry(dvp, &newdir, cnp); if (error) { dp->i_flag &= ~IN_E3INDEX; dp->i_flag |= IN_CHANGE | IN_UPDATE; } return (error); } if (EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_DIRHASHINDEX) && !ext2_htree_has_idx(dp)) { if ((dp->i_size / DIRBLKSIZ) == 1 && dp->i_offset == DIRBLKSIZ) { /* * Making indexed directory when one block is not * enough to save all entries. */ return ext2_htree_create_index(dvp, cnp, &newdir); } } /* * If dp->i_count is 0, then namei could find no * space in the directory. Here, dp->i_offset will * be on a directory block boundary and we will write the * new entry into a fresh block. */ if (dp->i_count == 0) return ext2_add_first_entry(dvp, &newdir, cnp); error = ext2_add_entry(dvp, &newdir); if (!error && dp->i_endoff && dp->i_endoff < dp->i_size) error = ext2_truncate(dvp, (off_t)dp->i_endoff, IO_SYNC, cnp->cn_cred, cnp->cn_thread); return (error); } /* * Insert an entry into the directory block. * Compact the contents. */ int ext2_add_entry(struct vnode *dvp, struct ext2fs_direct_2 *entry) { struct ext2fs_direct_2 *ep, *nep; struct inode *dp; struct buf *bp; u_int dsize; int error, loc, newentrysize, spacefree; char *dirbuf; dp = VTOI(dvp); /* * If dp->i_count is non-zero, then namei found space * for the new entry in the range dp->i_offset to * dp->i_offset + dp->i_count in the directory. * To use this space, we may have to compact the entries located * there, by copying them together towards the beginning of the * block, leaving the free space in one usable chunk at the end. */ /* * Increase size of directory if entry eats into new space. * This should never push the size past a new multiple of * DIRBLKSIZE. * * N.B. - THIS IS AN ARTIFACT OF 4.2 AND SHOULD NEVER HAPPEN. */ if (dp->i_offset + dp->i_count > dp->i_size) dp->i_size = dp->i_offset + dp->i_count; /* * Get the block containing the space for the new directory entry. */ if ((error = ext2_blkatoff(dvp, (off_t)dp->i_offset, &dirbuf, &bp)) != 0) return (error); /* * Find space for the new entry. In the simple case, the entry at * offset base will have the space. If it does not, then namei * arranged that compacting the region dp->i_offset to * dp->i_offset + dp->i_count would yield the * space. */ newentrysize = EXT2_DIR_REC_LEN(entry->e2d_namlen); ep = (struct ext2fs_direct_2 *)dirbuf; dsize = EXT2_DIR_REC_LEN(ep->e2d_namlen); - spacefree = ep->e2d_reclen - dsize; - for (loc = ep->e2d_reclen; loc < dp->i_count; ) { + spacefree = le16toh(ep->e2d_reclen) - dsize; + for (loc = le16toh(ep->e2d_reclen); loc < dp->i_count; ) { nep = (struct ext2fs_direct_2 *)(dirbuf + loc); - if (ep->e2d_ino) { + if (le32toh(ep->e2d_ino)) { /* trim the existing slot */ - ep->e2d_reclen = dsize; + ep->e2d_reclen = htole16(dsize); ep = (struct ext2fs_direct_2 *)((char *)ep + dsize); } else { /* overwrite; nothing there; header is ours */ spacefree += dsize; } dsize = EXT2_DIR_REC_LEN(nep->e2d_namlen); - spacefree += nep->e2d_reclen - dsize; - loc += nep->e2d_reclen; + spacefree += le16toh(nep->e2d_reclen) - dsize; + loc += le16toh(nep->e2d_reclen); bcopy((caddr_t)nep, (caddr_t)ep, dsize); } /* * Update the pointer fields in the previous entry (if any), * copy in the new entry, and write out the block. */ if (ep->e2d_ino == 0) { if (spacefree + dsize < newentrysize) panic("ext2_direnter: compact1"); - entry->e2d_reclen = spacefree + dsize; + entry->e2d_reclen = htole16(spacefree + dsize); } else { if (spacefree < newentrysize) panic("ext2_direnter: compact2"); - entry->e2d_reclen = spacefree; - ep->e2d_reclen = dsize; + entry->e2d_reclen = htole16(spacefree); + ep->e2d_reclen = htole16(dsize); ep = (struct ext2fs_direct_2 *)((char *)ep + dsize); } bcopy((caddr_t)entry, (caddr_t)ep, (u_int)newentrysize); ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)bp->b_data); if (DOINGASYNC(dvp)) { bdwrite(bp); error = 0; } else { error = bwrite(bp); } dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Remove a directory entry after a call to namei, using * the parameters which it left in nameidata. The entry * dp->i_offset contains the offset into the directory of the * entry to be eliminated. The dp->i_count field contains the * size of the previous record in the directory. If this * is 0, the first entry is being deleted, so we need only * zero the inode number to mark the entry as free. If the * entry is not the first in the directory, we must reclaim * the space of the now empty record by adding the record size * to the size of the previous entry. */ int ext2_dirremove(struct vnode *dvp, struct componentname *cnp) { struct inode *dp; struct ext2fs_direct_2 *ep, *rep; struct buf *bp; int error; dp = VTOI(dvp); if (dp->i_count == 0) { /* * First entry in block: set d_ino to zero. */ if ((error = ext2_blkatoff(dvp, (off_t)dp->i_offset, (char **)&ep, &bp)) != 0) return (error); ep->e2d_ino = 0; ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)bp->b_data); error = bwrite(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Collapse new free space into previous entry. */ if ((error = ext2_blkatoff(dvp, (off_t)(dp->i_offset - dp->i_count), (char **)&ep, &bp)) != 0) return (error); /* Set 'rep' to the entry being removed. */ if (dp->i_count == 0) rep = ep; else - rep = (struct ext2fs_direct_2 *)((char *)ep + ep->e2d_reclen); + rep = (struct ext2fs_direct_2 *)((char *)ep + + le16toh(ep->e2d_reclen)); ep->e2d_reclen += rep->e2d_reclen; ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)bp->b_data); if (DOINGASYNC(dvp) && dp->i_count != 0) bdwrite(bp); else error = bwrite(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Rewrite an existing directory entry to point at the inode * supplied. The parameters describing the directory entry are * set up by a call to namei. */ int ext2_dirrewrite(struct inode *dp, struct inode *ip, struct componentname *cnp) { struct buf *bp; struct ext2fs_direct_2 *ep; struct vnode *vdp = ITOV(dp); int error; if ((error = ext2_blkatoff(vdp, (off_t)dp->i_offset, (char **)&ep, &bp)) != 0) return (error); - ep->e2d_ino = ip->i_number; + ep->e2d_ino = htole32(ip->i_number); if (EXT2_HAS_INCOMPAT_FEATURE(ip->i_e2fs, EXT2F_INCOMPAT_FTYPE)) ep->e2d_type = DTTOFT(IFTODT(ip->i_mode)); else ep->e2d_type = EXT2_FT_UNKNOWN; ext2_dirent_csum_set(dp, (struct ext2fs_direct_2 *)bp->b_data); error = bwrite(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Check if a directory is empty or not. * Inode supplied must be locked. * * Using a struct dirtemplate here is not precisely * what we want, but better than using a struct direct. * * NB: does not handle corrupted directories. */ int ext2_dirempty(struct inode *ip, ino_t parentino, struct ucred *cred) { off_t off; struct dirtemplate dbuf; struct ext2fs_direct_2 *dp = (struct ext2fs_direct_2 *)&dbuf; int error, namlen; ssize_t count; #define MINDIRSIZ (sizeof(struct dirtemplate) / 2) - for (off = 0; off < ip->i_size; off += dp->e2d_reclen) { + for (off = 0; off < ip->i_size; off += le16toh(dp->e2d_reclen)) { error = vn_rdwr(UIO_READ, ITOV(ip), (caddr_t)dp, MINDIRSIZ, off, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, cred, NOCRED, &count, (struct thread *)0); /* * Since we read MINDIRSIZ, residual must * be 0 unless we're at end of file. */ if (error || count != 0) return (0); /* avoid infinite loops */ if (dp->e2d_reclen == 0) return (0); /* skip empty entries */ if (dp->e2d_ino == 0) continue; /* accept only "." and ".." */ namlen = dp->e2d_namlen; if (namlen > 2) return (0); if (dp->e2d_name[0] != '.') return (0); /* * At this point namlen must be 1 or 2. * 1 implies ".", 2 implies ".." if second * char is also "." */ if (namlen == 1) continue; - if (dp->e2d_name[1] == '.' && dp->e2d_ino == parentino) + if (dp->e2d_name[1] == '.' && le32toh(dp->e2d_ino) == parentino) continue; return (0); } return (1); } /* * Check if source directory is in the path of the target directory. * Target is supplied locked, source is unlocked. * The target is always vput before returning. */ int ext2_checkpath(struct inode *source, struct inode *target, struct ucred *cred) { struct vnode *vp; int error, namlen; struct dirtemplate dirbuf; vp = ITOV(target); if (target->i_number == source->i_number) { error = EEXIST; goto out; } if (target->i_number == EXT2_ROOTINO) { error = 0; goto out; } for (;;) { if (vp->v_type != VDIR) { error = ENOTDIR; break; } error = vn_rdwr(UIO_READ, vp, (caddr_t)&dirbuf, sizeof(struct dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, cred, NOCRED, NULL, NULL); if (error != 0) break; namlen = dirbuf.dotdot_type; /* like ufs little-endian */ if (namlen != 2 || dirbuf.dotdot_name[0] != '.' || dirbuf.dotdot_name[1] != '.') { error = ENOTDIR; break; } - if (dirbuf.dotdot_ino == source->i_number) { + if (le32toh(dirbuf.dotdot_ino) == source->i_number) { error = EINVAL; break; } - if (dirbuf.dotdot_ino == EXT2_ROOTINO) + if (le32toh(dirbuf.dotdot_ino) == EXT2_ROOTINO) break; vput(vp); - if ((error = VFS_VGET(vp->v_mount, dirbuf.dotdot_ino, + if ((error = VFS_VGET(vp->v_mount, le32toh(dirbuf.dotdot_ino), LK_EXCLUSIVE, &vp)) != 0) { vp = NULL; break; } } out: if (error == ENOTDIR) SDT_PROBE2(ext2fs, , lookup, trace, 1, "checkpath: .. not a directory"); if (vp != NULL) vput(vp); return (error); } Index: head/sys/fs/ext2fs/ext2_subr.c =================================================================== --- head/sys/fs/ext2fs/ext2_subr.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_subr.c (revision 361136) @@ -1,201 +1,201 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_subr.c 8.2 (Berkeley) 9/21/93 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Return buffer with the contents of block "offset" from the beginning of * directory "ip". If "res" is non-zero, fill it in with a pointer to the * remaining space in the directory. */ int ext2_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) { struct inode *ip; struct m_ext2fs *fs; struct buf *bp; e2fs_lbn_t lbn; int error, bsize; ip = VTOI(vp); fs = ip->i_e2fs; lbn = lblkno(fs, offset); bsize = blksize(fs, ip, lbn); if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } error = ext2_dir_blk_csum_verify(ip, bp); if (error != 0) { brelse(bp); return (error); } if (res) *res = (char *)bp->b_data + blkoff(fs, offset); *bpp = bp; return (0); } /* * Update the cluster map because of an allocation of free like ffs. * * Cnt == 1 means free; cnt == -1 means allocating. */ void ext2_clusteracct(struct m_ext2fs *fs, char *bbp, int cg, e4fs_daddr_t bno, int cnt) { int32_t *sump = fs->e2fs_clustersum[cg].cs_sum; int32_t *lp; e4fs_daddr_t start, end, loc, forw, back; int bit, i; /* Initialize the cluster summary array. */ if (fs->e2fs_clustersum[cg].cs_init == 0) { int run = 0; bit = 1; loc = 0; - for (i = 0; i < fs->e2fs->e2fs_fpg; i++) { + for (i = 0; i < fs->e2fs_fpg; i++) { if ((bbp[loc] & bit) == 0) run++; else if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; } fs->e2fs_clustersum[cg].cs_init = 1; } if (fs->e2fs_contigsumsize <= 0) return; /* Find the size of the cluster going forward. */ start = bno + 1; end = start + fs->e2fs_contigsumsize; - if (end > fs->e2fs->e2fs_fpg) - end = fs->e2fs->e2fs_fpg; + if (end > fs->e2fs_fpg) + end = fs->e2fs_fpg; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i < end; i++) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } forw = i - start; /* Find the size of the cluster going backward. */ start = bno - 1; end = start - fs->e2fs_contigsumsize; if (end < 0) end = -1; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i > end; i--) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != 0) bit >>= 1; else { loc--; bit = 1 << (NBBY - 1); } } back = start - i; /* * Account for old cluster and the possibly new forward and * back clusters. */ i = back + forw + 1; if (i > fs->e2fs_contigsumsize) i = fs->e2fs_contigsumsize; sump[i] += cnt; if (back > 0) sump[back] -= cnt; if (forw > 0) sump[forw] -= cnt; /* Update cluster summary information. */ lp = &sump[fs->e2fs_contigsumsize]; for (i = fs->e2fs_contigsumsize; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; } Index: head/sys/fs/ext2fs/ext2_vfsops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vfsops.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_vfsops.c (revision 361136) @@ -1,1430 +1,1445 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , vfsops, trace, "int", "char*"); SDT_PROBE_DEFINE2(ext2fs, , vfsops, ext2_cg_validate_error, "char*", "int"); SDT_PROBE_DEFINE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "char*"); static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td); static int ext2_mountfs(struct vnode *, struct mount *); static int ext2_reload(struct mount *mp, struct thread *td); static int ext2_sbupdate(struct ext2mount *, int); static int ext2_cgupdate(struct ext2mount *, int); static vfs_unmount_t ext2_unmount; static vfs_root_t ext2_root; static vfs_statfs_t ext2_statfs; static vfs_sync_t ext2_sync; static vfs_vget_t ext2_vget; static vfs_fhtovp_t ext2_fhtovp; static vfs_mount_t ext2_mount; MALLOC_DEFINE(M_EXT2NODE, "ext2_node", "EXT2 vnode private part"); static MALLOC_DEFINE(M_EXT2MNT, "ext2_mount", "EXT2 mount structure"); static struct vfsops ext2fs_vfsops = { .vfs_fhtovp = ext2_fhtovp, .vfs_mount = ext2_mount, .vfs_root = ext2_root, /* root inode via vget */ .vfs_statfs = ext2_statfs, .vfs_sync = ext2_sync, .vfs_unmount = ext2_unmount, .vfs_vget = ext2_vget, }; VFS_SET(ext2fs_vfsops, ext2fs, 0); static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly); static int ext2_compute_sb_data(struct vnode * devvp, struct ext2fs * es, struct m_ext2fs * fs); static const char *ext2_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "multilabel", "suiddir", "nosymfollow", "sync", "union", NULL }; /* * VFS Operations. * * mount system call */ static int ext2_mount(struct mount *mp) { struct vfsoptlist *opts; struct vnode *devvp; struct thread *td; struct ext2mount *ump = NULL; struct m_ext2fs *fs; struct nameidata nd, *ndp = &nd; accmode_t accmode; char *path, *fspec; int error, flags, len; td = curthread; opts = mp->mnt_optnew; if (vfs_filteropt(opts, ext2_opts)) return (EINVAL); vfs_getopt(opts, "fspath", (void **)&path, NULL); /* Double-check the length of path.. */ if (strlen(path) >= MAXMNTLEN) return (ENAMETOOLONG); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOEXT2(mp); fs = ump->um_e2fs; error = 0; if (fs->e2fs_ronly == 0 && vfs_flagopt(opts, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2_flushfiles(mp, flags, td); if (error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) { - fs->e2fs->e2fs_state |= E2FS_ISCLEAN; + fs->e2fs->e2fs_state = + htole16((le16toh(fs->e2fs->e2fs_state) | + E2FS_ISCLEAN)); ext2_sbupdate(ump, MNT_WAIT); } fs->e2fs_ronly = 1; vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY); g_topology_lock(); g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ext2_reload(mp, td); if (error) return (error); devvp = ump->um_devvp; if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) { if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0)) return (EPERM); /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp); return (error); } VOP_UNLOCK(devvp); g_topology_lock(); error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) return (error); - if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 || - (fs->e2fs->e2fs_state & E2FS_ERRORS)) { + if ((le16toh(fs->e2fs->e2fs_state) & E2FS_ISCLEAN) == 0 || + (le16toh(fs->e2fs->e2fs_state) & E2FS_ERRORS)) { if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->e2fs_fsmnt); return (EPERM); } } - fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; + fs->e2fs->e2fs_state = + htole16(le16toh(fs->e2fs->e2fs_state) & ~E2FS_ISCLEAN); (void)ext2_cgupdate(ump, MNT_WAIT); fs->e2fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } if (vfs_flagopt(opts, "export", NULL, 0)) { /* Process export requests in vfs_mount.c. */ return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp)) != 0) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * XXXRW: VOP_ACCESS() enough? */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = ext2_mountfs(devvp, mp); } else { if (devvp != ump->um_devvp) { vput(devvp); return (EINVAL); /* needs translation */ } else vput(devvp); } if (error) { vrele(devvp); return (error); } ump = VFSTOEXT2(mp); fs = ump->um_e2fs; /* * Note that this strncpy() is ok because of a check at the start * of ext2_mount(). */ strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN); fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0'; vfs_mountedfrom(mp, fspec); return (0); } static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly) { uint32_t i, mask; - if (es->e2fs_magic != E2FS_MAGIC) { + if (le16toh(es->e2fs_magic) != E2FS_MAGIC) { printf("ext2fs: %s: wrong magic number %#x (expected %#x)\n", - devtoname(dev), es->e2fs_magic, E2FS_MAGIC); + devtoname(dev), le16toh(es->e2fs_magic), E2FS_MAGIC); return (1); } - if (es->e2fs_rev > E2FS_REV0) { - mask = es->e2fs_features_incompat & ~(EXT2F_INCOMPAT_SUPP); + if (le32toh(es->e2fs_rev) > E2FS_REV0) { + mask = le32toh(es->e2fs_features_incompat) & ~(EXT2F_INCOMPAT_SUPP); if (mask) { printf("WARNING: mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(incompat)/sizeof(struct ext2_feature); i++) if (mask & incompat[i].mask) printf("%s ", incompat[i].name); printf("\n"); return (1); } - mask = es->e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP; + mask = le32toh(es->e2fs_features_rocompat) & ~EXT2F_ROCOMPAT_SUPP; if (!ronly && mask) { printf("WARNING: R/W mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(ro_compat)/sizeof(struct ext2_feature); i++) if (mask & ro_compat[i].mask) printf("%s ", ro_compat[i].name); printf("\n"); return (1); } } return (0); } static e4fs_daddr_t ext2_cg_location(struct m_ext2fs *fs, int number) { int cg, descpb, logical_sb, has_super = 0; /* * Adjust logical superblock block number. * Godmar thinks: if the blocksize is greater than 1024, then * the superblock is logically part of block zero. */ logical_sb = fs->e2fs_bsize > SBSIZE ? 0 : 1; if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || - number < fs->e2fs->e3fs_first_meta_bg) + number < le32toh(fs->e2fs->e3fs_first_meta_bg)) return (logical_sb + number + 1); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) descpb = fs->e2fs_bsize / sizeof(struct ext2_gd); else descpb = fs->e2fs_bsize / E2FS_REV0_GD_SIZE; cg = descpb * number; if (ext2_cg_has_sb(fs, cg)) has_super = 1; return (has_super + cg * (e4fs_daddr_t)EXT2_BLOCKS_PER_GROUP(fs) + - fs->e2fs->e2fs_first_dblock); + le32toh(fs->e2fs->e2fs_first_dblock)); } static int ext2_cg_validate(struct m_ext2fs *fs) { uint64_t b_bitmap; uint64_t i_bitmap; uint64_t i_tables; uint64_t first_block, last_block, last_cg_block; struct ext2_gd *gd; unsigned int i, cg_count; - first_block = fs->e2fs->e2fs_first_dblock; + first_block = le32toh(fs->e2fs->e2fs_first_dblock); last_cg_block = ext2_cg_number_gdb(fs, 0); cg_count = fs->e2fs_gcount; for (i = 0; i < fs->e2fs_gcount; i++) { gd = &fs->e2fs_gd[i]; if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || i == fs->e2fs_gcount - 1) { last_block = fs->e2fs_bcount - 1; } else { last_block = first_block + (EXT2_BLOCKS_PER_GROUP(fs) - 1); } if ((cg_count == fs->e2fs_gcount) && - !(gd->ext4bgd_flags & EXT2_BG_INODE_ZEROED)) + !(le16toh(gd->ext4bgd_flags) & EXT2_BG_INODE_ZEROED)) cg_count = i; b_bitmap = e2fs_gd_get_b_bitmap(gd); if (b_bitmap == 0) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "block bitmap is zero", i); return (EINVAL); } if (b_bitmap <= last_cg_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "block bitmap overlaps gds", i); return (EINVAL); } if (b_bitmap < first_block || b_bitmap > last_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "block bitmap not in group", i); return (EINVAL); } i_bitmap = e2fs_gd_get_i_bitmap(gd); if (i_bitmap == 0) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode bitmap is zero", i); return (EINVAL); } if (i_bitmap <= last_cg_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode bitmap overlaps gds", i); return (EINVAL); } if (i_bitmap < first_block || i_bitmap > last_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode bitmap not in group blk", i); return (EINVAL); } i_tables = e2fs_gd_get_i_tables(gd); if (i_tables == 0) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode table is zero", i); return (EINVAL); } if (i_tables <= last_cg_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode talbes overlaps gds", i); return (EINVAL); } if (i_tables < first_block || i_tables + fs->e2fs_itpg - 1 > last_block) { SDT_PROBE2(ext2fs, , vfsops, ext2_cg_validate_error, "inode tables not in group blk", i); return (EINVAL); } if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG)) first_block += EXT2_BLOCKS_PER_GROUP(fs); } return (0); } /* * This computes the fields of the m_ext2fs structure from the * data in the ext2fs structure read in. */ static int ext2_compute_sb_data(struct vnode *devvp, struct ext2fs *es, struct m_ext2fs *fs) { struct buf *bp; uint32_t e2fs_descpb, e2fs_gdbcount_alloc; int i, j; int g_count = 0; int error; /* Check checksum features */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) && EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "incorrect checksum features combination"); return (EINVAL); } /* Precompute checksum seed for all metadata */ ext2_sb_csum_set_seed(fs); /* Verify sb csum if possible */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { error = ext2_sb_csum_verify(fs); if (error) { return (error); } } /* Check for block size = 1K|2K|4K */ - if (es->e2fs_log_bsize > 2) { + if (le32toh(es->e2fs_log_bsize) > 2) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "bad block size"); return (EINVAL); } - fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + es->e2fs_log_bsize; + fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + le32toh(es->e2fs_log_bsize); fs->e2fs_bsize = 1U << fs->e2fs_bshift; - fs->e2fs_fsbtodb = es->e2fs_log_bsize + 1; + fs->e2fs_fsbtodb = le32toh(es->e2fs_log_bsize) + 1; fs->e2fs_qbmask = fs->e2fs_bsize - 1; /* Check for fragment size */ - if (es->e2fs_log_fsize > + if (le32toh(es->e2fs_log_fsize) > (EXT2_MAX_FRAG_LOG_SIZE - EXT2_MIN_BLOCK_LOG_SIZE)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "invalid log cluster size"); return (EINVAL); } - fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << es->e2fs_log_fsize; + fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << le32toh(es->e2fs_log_fsize); if (fs->e2fs_fsize != fs->e2fs_bsize) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "fragment size != block size"); return (EINVAL); } fs->e2fs_fpb = fs->e2fs_bsize / fs->e2fs_fsize; /* Check reserved gdt blocks for future filesystem expansion */ - if (es->e2fs_reserved_ngdb > (fs->e2fs_bsize / 4)) { + if (le16toh(es->e2fs_reserved_ngdb) > (fs->e2fs_bsize / 4)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "number of reserved GDT blocks too large"); return (EINVAL); } - if (es->e2fs_rev == E2FS_REV0) { + if (le32toh(es->e2fs_rev) == E2FS_REV0) { fs->e2fs_isize = E2FS_REV0_INODE_SIZE; } else { - fs->e2fs_isize = es->e2fs_inode_size; + fs->e2fs_isize = le16toh(es->e2fs_inode_size); /* * Check first ino. */ - if (es->e2fs_first_ino < EXT2_FIRSTINO) { + if (le32toh(es->e2fs_first_ino) < EXT2_FIRSTINO) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "invalid first ino"); return (EINVAL); } /* * Simple sanity check for superblock inode size value. */ if (EXT2_INODE_SIZE(fs) < E2FS_REV0_INODE_SIZE || EXT2_INODE_SIZE(fs) > fs->e2fs_bsize || (fs->e2fs_isize & (fs->e2fs_isize - 1)) != 0) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "invalid inode size"); return (EINVAL); } } /* Check group descriptors */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT) && - es->e3fs_desc_size != E2FS_64BIT_GD_SIZE) { + le16toh(es->e3fs_desc_size) != E2FS_64BIT_GD_SIZE) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "unsupported 64bit descriptor size"); return (EINVAL); } - fs->e2fs_bpg = es->e2fs_bpg; - fs->e2fs_fpg = es->e2fs_fpg; + fs->e2fs_bpg = le32toh(es->e2fs_bpg); + fs->e2fs_fpg = le32toh(es->e2fs_fpg); if (fs->e2fs_bpg == 0 || fs->e2fs_fpg == 0) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "zero blocks/fragments per group"); return (EINVAL); } else if (fs->e2fs_bpg != fs->e2fs_fpg) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "blocks per group not equal fragments per group"); return (EINVAL); } if (fs->e2fs_bpg != fs->e2fs_bsize * 8) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "non-standard group size unsupported"); return (EINVAL); } fs->e2fs_ipb = fs->e2fs_bsize / EXT2_INODE_SIZE(fs); if (fs->e2fs_ipb == 0 || fs->e2fs_ipb > fs->e2fs_bsize / E2FS_REV0_INODE_SIZE) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "bad inodes per block size"); return (EINVAL); } - fs->e2fs_ipg = es->e2fs_ipg; + fs->e2fs_ipg = le32toh(es->e2fs_ipg); if (fs->e2fs_ipg < fs->e2fs_ipb || fs->e2fs_ipg > fs->e2fs_bsize * 8) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "invalid inodes per group"); return (EINVAL); } fs->e2fs_itpg = fs->e2fs_ipg / fs->e2fs_ipb; - fs->e2fs_bcount = es->e2fs_bcount; - fs->e2fs_rbcount = es->e2fs_rbcount; - fs->e2fs_fbcount = es->e2fs_fbcount; + fs->e2fs_bcount = le32toh(es->e2fs_bcount); + fs->e2fs_rbcount = le32toh(es->e2fs_rbcount); + fs->e2fs_fbcount = le32toh(es->e2fs_fbcount); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { - fs->e2fs_bcount |= (uint64_t)(es->e4fs_bcount_hi) << 32; - fs->e2fs_rbcount |= (uint64_t)(es->e4fs_rbcount_hi) << 32; - fs->e2fs_fbcount |= (uint64_t)(es->e4fs_fbcount_hi) << 32; + fs->e2fs_bcount |= (uint64_t)(le32toh(es->e4fs_bcount_hi)) << 32; + fs->e2fs_rbcount |= (uint64_t)(le32toh(es->e4fs_rbcount_hi)) << 32; + fs->e2fs_fbcount |= (uint64_t)(le32toh(es->e4fs_fbcount_hi)) << 32; } if (fs->e2fs_rbcount > fs->e2fs_bcount || fs->e2fs_fbcount > fs->e2fs_bcount) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "invalid block count"); return (EINVAL); } - if (es->e2fs_first_dblock >= fs->e2fs_bcount) { + + fs->e2fs_ficount = le32toh(es->e2fs_ficount); + if (fs->e2fs_ficount > le32toh(es->e2fs_icount)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, + "invalid number of free inodes"); + return (EINVAL); + } + + if (le32toh(es->e2fs_first_dblock) >= fs->e2fs_bcount) { + SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "first data block out of range"); return (EINVAL); } - fs->e2fs_gcount = howmany(fs->e2fs_bcount - es->e2fs_first_dblock, - EXT2_BLOCKS_PER_GROUP(fs)); + fs->e2fs_gcount = howmany(fs->e2fs_bcount - + le32toh(es->e2fs_first_dblock), EXT2_BLOCKS_PER_GROUP(fs)); if (fs->e2fs_gcount > ((uint64_t)1 << 32) - EXT2_DESCS_PER_BLOCK(fs)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "groups count too large"); return (EINVAL); } /* Check for extra isize in big inodes. */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_EXTRA_ISIZE) && EXT2_INODE_SIZE(fs) < sizeof(struct ext2fs_dinode)) { SDT_PROBE1(ext2fs, , vfsops, ext2_compute_sb_data_error, "no space for extra inode timestamps"); return (EINVAL); } /* s_resuid / s_resgid ? */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { e2fs_descpb = fs->e2fs_bsize / E2FS_64BIT_GD_SIZE; e2fs_gdbcount_alloc = howmany(fs->e2fs_gcount, e2fs_descpb); } else { e2fs_descpb = fs->e2fs_bsize / E2FS_REV0_GD_SIZE; e2fs_gdbcount_alloc = howmany(fs->e2fs_gcount, fs->e2fs_bsize / sizeof(struct ext2_gd)); } fs->e2fs_gdbcount = howmany(fs->e2fs_gcount, e2fs_descpb); fs->e2fs_gd = malloc(e2fs_gdbcount_alloc * fs->e2fs_bsize, M_EXT2MNT, M_WAITOK | M_ZERO); fs->e2fs_contigdirs = malloc(fs->e2fs_gcount * sizeof(*fs->e2fs_contigdirs), M_EXT2MNT, M_WAITOK | M_ZERO); for (i = 0; i < fs->e2fs_gdbcount; i++) { error = bread(devvp, fsbtodb(fs, ext2_cg_location(fs, i)), fs->e2fs_bsize, NOCRED, &bp); if (error) { /* * fs->e2fs_gd and fs->e2fs_contigdirs * will be freed later by the caller, * because this function could be called from * MNT_UPDATE path. */ return (error); } if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { memcpy(&fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], bp->b_data, fs->e2fs_bsize); } else { for (j = 0; j < e2fs_descpb && g_count < fs->e2fs_gcount; j++, g_count++) memcpy(&fs->e2fs_gd[g_count], bp->b_data + j * E2FS_REV0_GD_SIZE, E2FS_REV0_GD_SIZE); } brelse(bp); bp = NULL; } /* Validate cgs consistency */ error = ext2_cg_validate(fs); if (error) return (error); /* Verfy cgs csum */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { error = ext2_gd_csum_verify(fs, devvp->v_rdev); if (error) return (error); } /* Initialization for the ext2 Orlov allocator variant. */ fs->e2fs_total_dir = 0; for (i = 0; i < fs->e2fs_gcount; i++) fs->e2fs_total_dir += e2fs_gd_get_ndirs(&fs->e2fs_gd[i]); - if (es->e2fs_rev == E2FS_REV0 || + if (le32toh(es->e2fs_rev) == E2FS_REV0 || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_LARGEFILE)) fs->e2fs_maxfilesize = 0x7fffffff; else { fs->e2fs_maxfilesize = 0xffffffffffff; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE)) fs->e2fs_maxfilesize = 0x7fffffffffffffff; } - if (es->e4fs_flags & E2FS_UNSIGNED_HASH) { + if (le32toh(es->e4fs_flags) & E2FS_UNSIGNED_HASH) { fs->e2fs_uhash = 3; - } else if ((es->e4fs_flags & E2FS_SIGNED_HASH) == 0) { + } else if ((le32toh(es->e4fs_flags) & E2FS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ - es->e4fs_flags |= E2FS_UNSIGNED_HASH; + es->e4fs_flags = htole32(le32toh(es->e4fs_flags) | E2FS_UNSIGNED_HASH); fs->e2fs_uhash = 3; #else - es->e4fs_flags |= E2FS_SIGNED_HASH; + es->e4fs_flags = htole32(le32toh(es->e4fs_flags) | E2FS_SIGNED_HASH); #endif } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) error = ext2_sb_csum_verify(fs); return (error); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) invalidate all cluster summary information. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. * XXX we are missing some steps, in particular # 3, this has to be reviewed. */ static int ext2_reload(struct mount *mp, struct thread *td) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct ext2fs *es; struct m_ext2fs *fs; struct csum *sump; int error, i; int32_t *lp; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOEXT2(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ext2_reload: dirty1"); VOP_UNLOCK(devvp); /* * Step 2: re-read superblock from disk. * constants have been adjusted for ext2 */ if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) return (error); es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, devvp->v_rdev, 0) != 0) { brelse(bp); return (EIO); /* XXX needs translation */ } fs = VFSTOEXT2(mp)->um_e2fs; bcopy(bp->b_data, fs->e2fs, sizeof(struct ext2fs)); if ((error = ext2_compute_sb_data(devvp, es, fs)) != 0) { brelse(bp); return (error); } #ifdef UNKLAR if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; #endif brelse(bp); /* * Step 3: invalidate all cluster summary information. */ if (fs->e2fs_contigsumsize > 0) { lp = fs->e2fs_maxcluster; sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) { *lp++ = fs->e2fs_contigsumsize; sump->cs_init = 0; bzero(sump->cs_sum, fs->e2fs_contigsumsize + 1); } } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ext2_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { VOP_UNLOCK(vp); vrele(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } error = ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)), ip); brelse(bp); VOP_UNLOCK(vp); vrele(vp); if (error) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } } return (0); } /* * Common code for mount and mountroot. */ static int ext2_mountfs(struct vnode *devvp, struct mount *mp) { struct ext2mount *ump; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *es; struct cdev *dev = devvp->v_rdev; struct g_consumer *cp; struct bufobj *bo; struct csum *sump; int error; int ronly; int i; u_long size; int32_t *lp; int32_t e2fs_maxcontig; ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0); /* XXX: use VOP_ACESS to check FS perms */ g_topology_lock(); error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1); g_topology_unlock(); VOP_UNLOCK(devvp); if (error) return (error); /* XXX: should we check for some sectorsize or 512 instead? */ if (((SBSIZE % cp->provider->sectorsize) != 0) || (SBSIZE < cp->provider->sectorsize)) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); return (EINVAL); } bo = &devvp->v_bufobj; bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; bp = NULL; ump = NULL; if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) goto out; es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, dev, ronly) != 0) { error = EINVAL; /* XXX needs translation */ goto out; } - if ((es->e2fs_state & E2FS_ISCLEAN) == 0 || - (es->e2fs_state & E2FS_ERRORS)) { + if ((le16toh(es->e2fs_state) & E2FS_ISCLEAN) == 0 || + (le16toh(es->e2fs_state) & E2FS_ERRORS)) { if (ronly || (mp->mnt_flag & MNT_FORCE)) { printf( "WARNING: Filesystem was not properly dismounted\n"); } else { printf( "WARNING: R/W mount denied. Filesystem is not clean - run fsck\n"); error = EPERM; goto out; } } ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO); /* * I don't know whether this is the right strategy. Note that * we dynamically allocate both an m_ext2fs and an ext2fs * while Linux keeps the super block in a locked buffer. */ ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_EXT2MNT, M_WAITOK | M_ZERO); ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs), M_EXT2MNT, M_WAITOK); mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF); bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs)); if ((error = ext2_compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs))) goto out; /* * Calculate the maximum contiguous blocks and size of cluster summary * array. In FFS this is done by newfs; however, the superblock * in ext2fs doesn't have these variables, so we can calculate * them here. */ e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize); ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG); if (ump->um_e2fs->e2fs_contigsumsize > 0) { size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t); ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK); size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum); ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK); lp = ump->um_e2fs->e2fs_maxcluster; sump = ump->um_e2fs->e2fs_clustersum; for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) { *lp++ = ump->um_e2fs->e2fs_contigsumsize; sump->cs_init = 0; sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) * sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO); } } brelse(bp); bp = NULL; fs = ump->um_e2fs; fs->e2fs_ronly = ronly; /* ronly is set according to mnt_flags */ /* * If the fs is not mounted read-only, make sure the super block is * always written back on a sync(). */ - fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0; + fs->e2fs_wasvalid = le16toh(fs->e2fs->e2fs_state) & E2FS_ISCLEAN ? 1 : 0; if (ronly == 0) { - fs->e2fs_fmod = 1; /* mark it modified */ - fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; /* set fs invalid */ + fs->e2fs_fmod = 1; /* mark it modified and set fs invalid */ + fs->e2fs->e2fs_state = + htole16(le16toh(fs->e2fs->e2fs_state) & ~E2FS_ISCLEAN); } mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_bo = &devvp->v_bufobj; ump->um_cp = cp; /* * Setting those two parameters allowed us to use * ufs_bmap w/o changse! */ ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs); - ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1; + ump->um_bptrtodb = le32toh(fs->e2fs->e2fs_log_bsize) + 1; ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs); if (ronly == 0) ext2_sbupdate(ump, MNT_WAIT); /* * Initialize filesystem stat information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_USES_BCACHE; MNT_IUNLOCK(mp); return (0); out: if (bp) brelse(bp); if (cp != NULL) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); } if (ump) { mtx_destroy(EXT2_MTX(ump)); free(ump->um_e2fs->e2fs_gd, M_EXT2MNT); free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT); free(ump->um_e2fs->e2fs, M_EXT2MNT); free(ump->um_e2fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; } return (error); } /* * Unmount system call. */ static int ext2_unmount(struct mount *mp, int mntflags) { struct ext2mount *ump; struct m_ext2fs *fs; struct csum *sump; int error, flags, i, ronly; flags = 0; if (mntflags & MNT_FORCE) { if (mp->mnt_flag & MNT_ROOTFS) return (EINVAL); flags |= FORCECLOSE; } if ((error = ext2_flushfiles(mp, flags, curthread)) != 0) return (error); ump = VFSTOEXT2(mp); fs = ump->um_e2fs; ronly = fs->e2fs_ronly; if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) { if (fs->e2fs_wasvalid) - fs->e2fs->e2fs_state |= E2FS_ISCLEAN; + fs->e2fs->e2fs_state = + htole16(le16toh(fs->e2fs->e2fs_state) | E2FS_ISCLEAN); ext2_sbupdate(ump, MNT_WAIT); } g_topology_lock(); g_vfs_close(ump->um_cp); g_topology_unlock(); vrele(ump->um_devvp); sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) free(sump->cs_sum, M_EXT2MNT); free(fs->e2fs_clustersum, M_EXT2MNT); free(fs->e2fs_maxcluster, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs, M_EXT2MNT); free(fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); return (error); } /* * Flush out all the files in a filesystem. */ static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td) { int error; error = vflush(mp, 0, flags, td); return (error); } /* * Get filesystem statistics. */ int ext2_statfs(struct mount *mp, struct statfs *sbp) { struct ext2mount *ump; struct m_ext2fs *fs; uint32_t overhead, overhead_per_group, ngdb; int i, ngroups; ump = VFSTOEXT2(mp); fs = ump->um_e2fs; - if (fs->e2fs->e2fs_magic != E2FS_MAGIC) + if (le16toh(fs->e2fs->e2fs_magic) != E2FS_MAGIC) panic("ext2_statfs"); /* * Compute the overhead (FS structures) */ overhead_per_group = 1 /* block bitmap */ + 1 /* inode bitmap */ + fs->e2fs_itpg; - overhead = fs->e2fs->e2fs_first_dblock + + overhead = le32toh(fs->e2fs->e2fs_first_dblock) + fs->e2fs_gcount * overhead_per_group; - if (fs->e2fs->e2fs_rev > E2FS_REV0 && - fs->e2fs->e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) { + if (le32toh(fs->e2fs->e2fs_rev) > E2FS_REV0 && + le32toh(fs->e2fs->e2fs_features_rocompat) & EXT2F_ROCOMPAT_SPARSESUPER) { for (i = 0, ngroups = 0; i < fs->e2fs_gcount; i++) { if (ext2_cg_has_sb(fs, i)) ngroups++; } } else { ngroups = fs->e2fs_gcount; } ngdb = fs->e2fs_gdbcount; - if (fs->e2fs->e2fs_rev > E2FS_REV0 && - fs->e2fs->e2fs_features_compat & EXT2F_COMPAT_RESIZE) - ngdb += fs->e2fs->e2fs_reserved_ngdb; + if (le32toh(fs->e2fs->e2fs_rev) > E2FS_REV0 && + le32toh(fs->e2fs->e2fs_features_compat) & EXT2F_COMPAT_RESIZE) + ngdb += le16toh(fs->e2fs->e2fs_reserved_ngdb); overhead += ngroups * (1 /* superblock */ + ngdb); sbp->f_bsize = EXT2_FRAG_SIZE(fs); sbp->f_iosize = EXT2_BLOCK_SIZE(fs); sbp->f_blocks = fs->e2fs_bcount - overhead; sbp->f_bfree = fs->e2fs_fbcount; sbp->f_bavail = sbp->f_bfree - fs->e2fs_rbcount; - sbp->f_files = fs->e2fs->e2fs_icount; - sbp->f_ffree = fs->e2fs->e2fs_ficount; + sbp->f_files = le32toh(fs->e2fs->e2fs_icount); + sbp->f_ffree = fs->e2fs_ficount; return (0); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ static int ext2_sync(struct mount *mp, int waitfor) { struct vnode *mvp, *vp; struct thread *td; struct inode *ip; struct ext2mount *ump = VFSTOEXT2(mp); struct m_ext2fs *fs; int error, allerror = 0; td = curthread; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ panic("ext2_sync: rofs mod fs=%s", fs->e2fs_fsmnt); } /* * Write back each (modified) inode. */ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && (vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; } error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); if (error) { if (error == ENOENT) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = VOP_FSYNC(vp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(vp); vrele(vp); } /* * Force stale filesystem control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; - fs->e2fs->e2fs_wtime = time_second; + fs->e2fs->e2fs_wtime = htole32(time_second); if ((error = ext2_cgupdate(ump, waitfor)) != 0) allerror = error; } return (allerror); } /* * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ static int ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; struct buf *bp; struct vnode *vp; struct thread *td; unsigned int i, used_blocks; int error; td = curthread; error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); ump = VFSTOEXT2(mp); ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { free(ip, M_EXT2NODE); *vpp = NULL; return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } /* convert ext2 inode to dinode */ error = ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ino)), ip); if (error) { brelse(bp); vput(vp); *vpp = NULL; return (error); } ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; /* * Now we want to make sure that block pointers for unused * blocks are zeroed out - ext2_balloc depends on this * although for regular files and directories only * * If IN_E4EXTENTS is enabled, unused blocks are not zeroed * out because we could corrupt the extent tree. */ if (!(ip->i_flag & IN_E4EXTENTS) && (S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode))) { used_blocks = howmany(ip->i_size, fs->e2fs_bsize); for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++) ip->i_db[i] = 0; } #ifdef EXT2FS_PRINT_EXTENTS ext2_print_inode(ip); ext4_ext_print_extent_tree_status(ip); #endif bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - call ext2_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct inode *ip; struct ufid *ufhp; struct vnode *nvp; struct m_ext2fs *fs; int error; ufhp = (struct ufid *)fhp; fs = VFSTOEXT2(mp)->um_e2fs; if (ufhp->ufid_ino < EXT2_ROOTINO || - ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs->e2fs_ipg) + ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs_ipg) return (ESTALE); error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp); if (error) { *vpp = NULLVP; return (error); } ip = VTOI(nvp); if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) { vput(nvp); *vpp = NULLVP; return (ESTALE); } *vpp = nvp; vnode_create_vobject(*vpp, 0, curthread); return (0); } /* * Write a superblock and associated information back to disk. */ static int ext2_sbupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct ext2fs *es = fs->e2fs; struct buf *bp; int error = 0; - es->e2fs_bcount = fs->e2fs_bcount & 0xffffffff; - es->e2fs_rbcount = fs->e2fs_rbcount & 0xffffffff; - es->e2fs_fbcount = fs->e2fs_fbcount & 0xffffffff; + es->e2fs_bcount = htole32(fs->e2fs_bcount & 0xffffffff); + es->e2fs_rbcount = htole32(fs->e2fs_rbcount & 0xffffffff); + es->e2fs_fbcount = htole32(fs->e2fs_fbcount & 0xffffffff); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { - es->e4fs_bcount_hi = fs->e2fs_bcount >> 32; - es->e4fs_rbcount_hi = fs->e2fs_rbcount >> 32; - es->e4fs_fbcount_hi = fs->e2fs_fbcount >> 32; + es->e4fs_bcount_hi = htole32(fs->e2fs_bcount >> 32); + es->e4fs_rbcount_hi = htole32(fs->e2fs_rbcount >> 32); + es->e4fs_fbcount_hi = htole32(fs->e2fs_fbcount >> 32); } + + es->e2fs_ficount = htole32(fs->e2fs_ficount); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) ext2_sb_csum_set(fs); bp = getblk(mp->um_devvp, SBLOCK, SBSIZE, 0, 0, 0); bcopy((caddr_t)es, bp->b_data, (u_int)sizeof(struct ext2fs)); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); /* * The buffers for group descriptors, inode bitmaps and block bitmaps * are not busy at this point and are (hopefully) written by the * usual sync mechanism. No need to write them here. */ return (error); } int ext2_cgupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct buf *bp; int i, j, g_count = 0, error = 0, allerror = 0; allerror = ext2_sbupdate(mp, waitfor); /* Update gd csums */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) ext2_gd_csum_set(fs); for (i = 0; i < fs->e2fs_gdbcount; i++) { bp = getblk(mp->um_devvp, fsbtodb(fs, ext2_cg_location(fs, i)), fs->e2fs_bsize, 0, 0, 0); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { memcpy(bp->b_data, &fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); } else { for (j = 0; j < fs->e2fs_bsize / E2FS_REV0_GD_SIZE && g_count < fs->e2fs_gcount; j++, g_count++) memcpy(bp->b_data + j * E2FS_REV0_GD_SIZE, &fs->e2fs_gd[g_count], E2FS_REV0_GD_SIZE); } if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); } if (!allerror && error) allerror = error; return (allerror); } /* * Return the root of a filesystem. */ static int ext2_root(struct mount *mp, int flags, struct vnode **vpp) { struct vnode *nvp; int error; error = VFS_VGET(mp, EXT2_ROOTINO, LK_EXCLUSIVE, &nvp); if (error) return (error); *vpp = nvp; return (0); } Index: head/sys/fs/ext2fs/ext2_vnops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vnops.c (revision 361135) +++ head/sys/fs/ext2fs/ext2_vnops.c (revision 361136) @@ -1,2363 +1,2365 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_vnops.c 8.7 (Berkeley) 2/3/94 * @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95 * $FreeBSD$ */ #include "opt_suiddir.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_directio.h" #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(ext2fs); /* * ext2fs trace probe: * arg0: verbosity. Higher numbers give more verbose messages * arg1: Textual message */ SDT_PROBE_DEFINE2(ext2fs, , vnops, trace, "int", "char*"); static int ext2_makeinode(int mode, struct vnode *, struct vnode **, struct componentname *); static void ext2_itimes_locked(struct vnode *); static vop_access_t ext2_access; static int ext2_chmod(struct vnode *, int, struct ucred *, struct thread *); static int ext2_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *); static vop_close_t ext2_close; static vop_create_t ext2_create; static vop_fsync_t ext2_fsync; static vop_getattr_t ext2_getattr; static vop_ioctl_t ext2_ioctl; static vop_link_t ext2_link; static vop_mkdir_t ext2_mkdir; static vop_mknod_t ext2_mknod; static vop_open_t ext2_open; static vop_pathconf_t ext2_pathconf; static vop_print_t ext2_print; static vop_read_t ext2_read; static vop_readlink_t ext2_readlink; static vop_remove_t ext2_remove; static vop_rename_t ext2_rename; static vop_rmdir_t ext2_rmdir; static vop_setattr_t ext2_setattr; static vop_strategy_t ext2_strategy; static vop_symlink_t ext2_symlink; static vop_write_t ext2_write; static vop_deleteextattr_t ext2_deleteextattr; static vop_getextattr_t ext2_getextattr; static vop_listextattr_t ext2_listextattr; static vop_setextattr_t ext2_setextattr; static vop_vptofh_t ext2_vptofh; static vop_close_t ext2fifo_close; static vop_kqfilter_t ext2fifo_kqfilter; /* Global vfs data structures for ext2. */ struct vop_vector ext2_vnodeops = { .vop_default = &default_vnodeops, .vop_access = ext2_access, .vop_bmap = ext2_bmap, .vop_cachedlookup = ext2_lookup, .vop_close = ext2_close, .vop_create = ext2_create, .vop_fsync = ext2_fsync, .vop_getpages = vnode_pager_local_getpages, .vop_getpages_async = vnode_pager_local_getpages_async, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_ioctl = ext2_ioctl, .vop_link = ext2_link, .vop_lookup = vfs_cache_lookup, .vop_mkdir = ext2_mkdir, .vop_mknod = ext2_mknod, .vop_open = ext2_open, .vop_pathconf = ext2_pathconf, .vop_poll = vop_stdpoll, .vop_print = ext2_print, .vop_read = ext2_read, .vop_readdir = ext2_readdir, .vop_readlink = ext2_readlink, .vop_reallocblks = ext2_reallocblks, .vop_reclaim = ext2_reclaim, .vop_remove = ext2_remove, .vop_rename = ext2_rename, .vop_rmdir = ext2_rmdir, .vop_setattr = ext2_setattr, .vop_strategy = ext2_strategy, .vop_symlink = ext2_symlink, .vop_write = ext2_write, .vop_deleteextattr = ext2_deleteextattr, .vop_getextattr = ext2_getextattr, .vop_listextattr = ext2_listextattr, .vop_setextattr = ext2_setextattr, #ifdef UFS_ACL .vop_getacl = ext2_getacl, .vop_setacl = ext2_setacl, .vop_aclcheck = ext2_aclcheck, #endif /* UFS_ACL */ .vop_vptofh = ext2_vptofh, }; VFS_VOP_VECTOR_REGISTER(ext2_vnodeops); struct vop_vector ext2_fifoops = { .vop_default = &fifo_specops, .vop_access = ext2_access, .vop_close = ext2fifo_close, .vop_fsync = ext2_fsync, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_kqfilter = ext2fifo_kqfilter, .vop_pathconf = ext2_pathconf, .vop_print = ext2_print, .vop_read = VOP_PANIC, .vop_reclaim = ext2_reclaim, .vop_setattr = ext2_setattr, .vop_write = VOP_PANIC, .vop_vptofh = ext2_vptofh, }; VFS_VOP_VECTOR_REGISTER(ext2_fifoops); /* * A virgin directory (no blushing please). * Note that the type and namlen fields are reversed relative to ext2. * Also, we don't use `struct odirtemplate', since it would just cause * endianness problems. */ static struct dirtemplate mastertemplate = { - 0, 12, 1, EXT2_FT_DIR, ".", - 0, DIRBLKSIZ - 12, 2, EXT2_FT_DIR, ".." + 0, htole16(12), 1, EXT2_FT_DIR, ".", + 0, htole16(DIRBLKSIZ - 12), 2, EXT2_FT_DIR, ".." }; static struct dirtemplate omastertemplate = { - 0, 12, 1, EXT2_FT_UNKNOWN, ".", - 0, DIRBLKSIZ - 12, 2, EXT2_FT_UNKNOWN, ".." + 0, htole16(12), 1, EXT2_FT_UNKNOWN, ".", + 0, htole16(DIRBLKSIZ - 12), 2, EXT2_FT_UNKNOWN, ".." }; static void ext2_itimes_locked(struct vnode *vp) { struct inode *ip; struct timespec ts; ASSERT_VI_LOCKED(vp, __func__); ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0) return; if ((vp->v_type == VBLK || vp->v_type == VCHR)) ip->i_flag |= IN_LAZYMOD; else ip->i_flag |= IN_MODIFIED; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { vfs_timestamp(&ts); if (ip->i_flag & IN_ACCESS) { ip->i_atime = ts.tv_sec; ip->i_atimensec = ts.tv_nsec; } if (ip->i_flag & IN_UPDATE) { ip->i_mtime = ts.tv_sec; ip->i_mtimensec = ts.tv_nsec; ip->i_modrev++; } if (ip->i_flag & IN_CHANGE) { ip->i_ctime = ts.tv_sec; ip->i_ctimensec = ts.tv_nsec; } } ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); } void ext2_itimes(struct vnode *vp) { VI_LOCK(vp); ext2_itimes_locked(vp); VI_UNLOCK(vp); } /* * Create a regular file */ static int ext2_create(struct vop_create_args *ap) { int error; error = ext2_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode), ap->a_dvp, ap->a_vpp, ap->a_cnp); if (error != 0) return (error); if ((ap->a_cnp->cn_flags & MAKEENTRY) != 0) cache_enter(ap->a_dvp, *ap->a_vpp, ap->a_cnp); return (0); } static int ext2_open(struct vop_open_args *ap) { if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) return (EOPNOTSUPP); /* * Files marked append-only must be opened for appending. */ if ((VTOI(ap->a_vp)->i_flags & APPEND) && (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) return (EPERM); vnode_create_vobject(ap->a_vp, VTOI(ap->a_vp)->i_size, ap->a_td); return (0); } /* * Close called. * * Update the times on the inode. */ static int ext2_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (0); } static int ext2_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); accmode_t accmode = ap->a_accmode; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) return (EOPNOTSUPP); /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ if (accmode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } /* If immutable bit set, nobody gets to write it. */ if ((accmode & VWRITE) && (ip->i_flags & (SF_IMMUTABLE | SF_SNAPSHOT))) return (EPERM); error = vaccess(vp->v_type, ip->i_mode, ip->i_uid, ip->i_gid, ap->a_accmode, ap->a_cred, NULL); return (error); } static int ext2_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct vattr *vap = ap->a_vap; ext2_itimes(vp); /* * Copy from inode table */ vap->va_fsid = dev2udev(ip->i_devvp->v_rdev); vap->va_fileid = ip->i_number; vap->va_mode = ip->i_mode & ~IFMT; vap->va_nlink = ip->i_nlink; vap->va_uid = ip->i_uid; vap->va_gid = ip->i_gid; vap->va_rdev = ip->i_rdev; vap->va_size = ip->i_size; vap->va_atime.tv_sec = ip->i_atime; vap->va_atime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_atimensec : 0; vap->va_mtime.tv_sec = ip->i_mtime; vap->va_mtime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_mtimensec : 0; vap->va_ctime.tv_sec = ip->i_ctime; vap->va_ctime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_ctimensec : 0; if E2DI_HAS_XTIME(ip) { vap->va_birthtime.tv_sec = ip->i_birthtime; vap->va_birthtime.tv_nsec = ip->i_birthnsec; } vap->va_flags = ip->i_flags; vap->va_gen = ip->i_gen; vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; vap->va_bytes = dbtob((u_quad_t)ip->i_blocks); vap->va_type = IFTOVT(ip->i_mode); vap->va_filerev = ip->i_modrev; return (0); } /* * Set attribute vnode op. called from several syscalls */ static int ext2_setattr(struct vop_setattr_args *ap) { struct vattr *vap = ap->a_vap; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct ucred *cred = ap->a_cred; struct thread *td = curthread; int error; /* * Check for unsettable attributes. */ if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { return (EINVAL); } if (vap->va_flags != VNOVAL) { /* Disallow flags not supported by ext2fs. */ if (vap->va_flags & ~(SF_APPEND | SF_IMMUTABLE | UF_NODUMP)) return (EOPNOTSUPP); if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * Callers may only modify the file flags on objects they * have VADMIN rights for. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Unprivileged processes and privileged processes in * jail() are not permitted to unset system flags, or * modify flags if any system flags are set. * Privileged non-jail processes may not modify system flags * if securelevel > 0 and any existing system flags are set. */ if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS)) { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND)) { error = securelevel_gt(cred, 0); if (error) return (error); } } else { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND) || ((vap->va_flags ^ ip->i_flags) & SF_SETTABLE)) return (EPERM); } ip->i_flags = vap->va_flags; ip->i_flag |= IN_CHANGE; if (ip->i_flags & (IMMUTABLE | APPEND)) return (0); } if (ip->i_flags & (IMMUTABLE | APPEND)) return (EPERM); /* * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if ((error = ext2_chown(vp, vap->va_uid, vap->va_gid, cred, td)) != 0) return (error); } if (vap->va_size != VNOVAL) { /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ switch (vp->v_type) { case VDIR: return (EISDIR); case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } if ((error = ext2_truncate(vp, vap->va_size, 0, cred, td)) != 0) return (error); } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * From utimes(2): * If times is NULL, ... The caller must be the owner of * the file, have permission to write the file, or be the * super-user. * If times is non-NULL, ... The caller must be the owner of * the file or be the super-user. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td)) && ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || (error = VOP_ACCESS(vp, VWRITE, cred, td)))) return (error); ip->i_flag |= IN_CHANGE | IN_MODIFIED; if (vap->va_atime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_ACCESS; ip->i_atime = vap->va_atime.tv_sec; ip->i_atimensec = vap->va_atime.tv_nsec; } if (vap->va_mtime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_UPDATE; ip->i_mtime = vap->va_mtime.tv_sec; ip->i_mtimensec = vap->va_mtime.tv_nsec; } ip->i_birthtime = vap->va_birthtime.tv_sec; ip->i_birthnsec = vap->va_birthtime.tv_nsec; error = ext2_update(vp, 0); if (error) return (error); } error = 0; if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); error = ext2_chmod(vp, (int)vap->va_mode, cred, td); } return (error); } /* * Change the mode on a file. * Inode must be locked before calling. */ static int ext2_chmod(struct vnode *vp, int mode, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); int error; /* * To modify the permissions on a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Privileged processes may set the sticky bit on non-directories, * as well as set the setgid bit on a file with a group that the * process is not a member of. */ if (vp->v_type != VDIR && (mode & S_ISTXT)) { error = priv_check_cred(cred, PRIV_VFS_STICKYFILE); if (error) return (EFTYPE); } if (!groupmember(ip->i_gid, cred) && (mode & ISGID)) { error = priv_check_cred(cred, PRIV_VFS_SETGID); if (error) return (error); } ip->i_mode &= ~ALLPERMS; ip->i_mode |= (mode & ALLPERMS); ip->i_flag |= IN_CHANGE; return (0); } /* * Perform chown operation on inode ip; * inode must be locked prior to call. */ static int ext2_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); uid_t ouid; gid_t ogid; int error = 0; if (uid == (uid_t)VNOVAL) uid = ip->i_uid; if (gid == (gid_t)VNOVAL) gid = ip->i_gid; /* * To modify the ownership of a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * To change the owner of a file, or change the group of a file * to a group of which we are not a member, the caller must * have privilege. */ if (uid != ip->i_uid || (gid != ip->i_gid && !groupmember(gid, cred))) { error = priv_check_cred(cred, PRIV_VFS_CHOWN); if (error) return (error); } ogid = ip->i_gid; ouid = ip->i_uid; ip->i_gid = gid; ip->i_uid = uid; ip->i_flag |= IN_CHANGE; if ((ip->i_mode & (ISUID | ISGID)) && (ouid != uid || ogid != gid)) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID) != 0) ip->i_mode &= ~(ISUID | ISGID); } return (0); } /* * Synch an open file. */ /* ARGSUSED */ static int ext2_fsync(struct vop_fsync_args *ap) { /* * Flush all dirty buffers associated with a vnode. */ vop_stdfsync(ap); return (ext2_update(ap->a_vp, ap->a_waitfor == MNT_WAIT)); } /* * Mknod vnode call */ /* ARGSUSED */ static int ext2_mknod(struct vop_mknod_args *ap) { struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; ino_t ino; int error; error = ext2_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ if (!(ip->i_flag & IN_E4EXTENTS)) ip->i_rdev = vap->va_rdev; } /* * Remove inode, then reload it through VFS_VGET so it is * checked to see if it is an alias of an existing entry in * the inode cache. XXX I don't believe this is necessary now. */ (*vpp)->v_type = VNON; ino = ip->i_number; /* Save this before vgone() invalidates ip. */ vgone(*vpp); vput(*vpp); error = VFS_VGET(ap->a_dvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { *vpp = NULL; return (error); } return (0); } static int ext2_remove(struct vop_remove_args *ap) { struct inode *ip; struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; int error; ip = VTOI(vp); if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(dvp)->i_flags & APPEND)) { error = EPERM; goto out; } error = ext2_dirremove(dvp, ap->a_cnp); if (error == 0) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } /* * link vnode call */ static int ext2_link(struct vop_link_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; struct inode *ip; int error; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_link: no name"); #endif ip = VTOI(vp); if ((nlink_t)ip->i_nlink >= EXT4_LINK_MAX) { error = EMLINK; goto out; } if (ip->i_flags & (IMMUTABLE | APPEND)) { error = EPERM; goto out; } ip->i_nlink++; ip->i_flag |= IN_CHANGE; error = ext2_update(vp, !DOINGASYNC(vp)); if (!error) error = ext2_direnter(ip, tdvp, cnp); if (error) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } static int ext2_inc_nlink(struct inode *ip) { ip->i_nlink++; if (S_ISDIR(ip->i_mode) && EXT2_HAS_RO_COMPAT_FEATURE(ip->i_e2fs, EXT2F_ROCOMPAT_DIR_NLINK) && ip->i_nlink > 1) { if (ip->i_nlink >= EXT4_LINK_MAX || ip->i_nlink == 2) ip->i_nlink = 1; } else if (ip->i_nlink > EXT4_LINK_MAX) { ip->i_nlink--; return (EMLINK); } return (0); } static void ext2_dec_nlink(struct inode *ip) { if (!S_ISDIR(ip->i_mode) || ip->i_nlink > 2) ip->i_nlink--; } /* * Rename system call. * rename("foo", "bar"); * is essentially * unlink("bar"); * link("foo", "bar"); * unlink("foo"); * but ``atomically''. Can't do full commit without saving state in the * inode on disk which isn't feasible at this time. Best we can do is * always guarantee the target exists. * * Basic algorithm is: * * 1) Bump link count on source while we're linking it to the * target. This also ensure the inode won't be deleted out * from underneath us while we work (it may be truncated by * a concurrent `trunc' or `open' for creation). * 2) Link source to destination. If destination already exists, * delete it first. * 3) Unlink source reference to inode if still around. If a * directory was moved and the parent of the destination * is different from the source, patch the ".." entry in the * directory. */ static int ext2_rename(struct vop_rename_args *ap) { struct vnode *tvp = ap->a_tvp; struct vnode *tdvp = ap->a_tdvp; struct vnode *fvp = ap->a_fvp; struct vnode *fdvp = ap->a_fdvp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; struct inode *ip, *xp, *dp; struct dirtemplate *dirbuf; int doingdirectory = 0, oldparent = 0, newparent = 0; int error = 0; u_char namlen; #ifdef INVARIANTS if ((tcnp->cn_flags & HASBUF) == 0 || (fcnp->cn_flags & HASBUF) == 0) panic("ext2_rename: no name"); #endif /* * Check for cross-device rename. */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; abortit: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); return (error); } if (tvp && ((VTOI(tvp)->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(tdvp)->i_flags & APPEND))) { error = EPERM; goto abortit; } /* * Renaming a file to itself has no effect. The upper layers should * not call us in that case. Temporarily just warn if they do. */ if (fvp == tvp) { SDT_PROBE2(ext2fs, , vnops, trace, 1, "rename: fvp == tvp (can't happen)"); error = 0; goto abortit; } if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0) goto abortit; dp = VTOI(fdvp); ip = VTOI(fvp); if (ip->i_nlink >= EXT4_LINK_MAX && !EXT2_HAS_RO_COMPAT_FEATURE(ip->i_e2fs, EXT2F_ROCOMPAT_DIR_NLINK)) { VOP_UNLOCK(fvp); error = EMLINK; goto abortit; } if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (dp->i_flags & APPEND)) { VOP_UNLOCK(fvp); error = EPERM; goto abortit; } if ((ip->i_mode & IFMT) == IFDIR) { /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || dp == ip || (fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT || (ip->i_flag & IN_RENAME)) { VOP_UNLOCK(fvp); error = EINVAL; goto abortit; } ip->i_flag |= IN_RENAME; oldparent = dp->i_number; doingdirectory++; } vrele(fdvp); /* * When the target exists, both the directory * and target vnodes are returned locked. */ dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); /* * 1) Bump link count while we're moving stuff * around. If we crash somewhere before * completing our work, the link count * may be wrong, but correctable. */ ext2_inc_nlink(ip); ip->i_flag |= IN_CHANGE; if ((error = ext2_update(fvp, !DOINGASYNC(fvp))) != 0) { VOP_UNLOCK(fvp); goto bad; } /* * If ".." must be changed (ie the directory gets a new * parent) then the source directory must not be in the * directory hierarchy above the target, as this would * orphan everything below the source directory. Also * the user must have write permission in the source so * as to be able to change "..". We must repeat the call * to namei, as the parent directory is unlocked by the * call to checkpath(). */ error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_thread); VOP_UNLOCK(fvp); if (oldparent != dp->i_number) newparent = dp->i_number; if (doingdirectory && newparent) { if (error) /* write access check above */ goto bad; if (xp != NULL) vput(tvp); error = ext2_checkpath(ip, dp, tcnp->cn_cred); if (error) goto out; VREF(tdvp); error = relookup(tdvp, &tvp, tcnp); if (error) goto out; vrele(tdvp); dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); } /* * 2) If target doesn't exist, link the target * to the source and unlink the source. * Otherwise, rewrite the target directory * entry to reference the source inode and * expunge the original entry's existence. */ if (xp == NULL) { if (dp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Account for ".." in new directory. * When source and destination have the same * parent we don't fool with the link count. */ if (doingdirectory && newparent) { error = ext2_inc_nlink(dp); if (error) goto bad; dp->i_flag |= IN_CHANGE; error = ext2_update(tdvp, !DOINGASYNC(tdvp)); if (error) goto bad; } error = ext2_direnter(ip, tdvp, tcnp); if (error) { if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; (void)ext2_update(tdvp, 1); } goto bad; } vput(tdvp); } else { if (xp->i_devvp != dp->i_devvp || xp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Short circuit rename(foo, foo). */ if (xp->i_number == ip->i_number) panic("ext2_rename: same file"); /* * If the parent directory is "sticky", then the user must * own the parent directory, or the destination of the rename, * otherwise the destination may not be changed (except by * root). This implements append-only directories. */ if ((dp->i_mode & S_ISTXT) && tcnp->cn_cred->cr_uid != 0 && tcnp->cn_cred->cr_uid != dp->i_uid && xp->i_uid != tcnp->cn_cred->cr_uid) { error = EPERM; goto bad; } /* * Target must be empty if a directory and have no links * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ if ((xp->i_mode & IFMT) == IFDIR) { if (!ext2_dirempty(xp, dp->i_number, tcnp->cn_cred)) { error = ENOTEMPTY; goto bad; } if (!doingdirectory) { error = ENOTDIR; goto bad; } cache_purge(tdvp); } else if (doingdirectory) { error = EISDIR; goto bad; } error = ext2_dirrewrite(dp, ip, tcnp); if (error) goto bad; /* * If the target directory is in the same * directory as the source directory, * decrement the link count on the parent * of the target directory. */ if (doingdirectory && !newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } vput(tdvp); /* * Adjust the link count of the target to * reflect the dirrewrite above. If this is * a directory it is empty and there are * no links to it, so we can squash the inode and * any space associated with it. We disallowed * renaming over top of a directory with links to * it above, as the remaining link would point to * a directory without "." or ".." entries. */ ext2_dec_nlink(xp); if (doingdirectory) { if (xp->i_nlink > 2) panic("ext2_rename: linked directory"); error = ext2_truncate(tvp, (off_t)0, IO_SYNC, tcnp->cn_cred, tcnp->cn_thread); xp->i_nlink = 0; } xp->i_flag |= IN_CHANGE; vput(tvp); xp = NULL; } /* * 3) Unlink the source. */ fcnp->cn_flags &= ~MODMASK; fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; VREF(fdvp); error = relookup(fdvp, &fvp, fcnp); if (error == 0) vrele(fdvp); if (fvp != NULL) { xp = VTOI(fvp); dp = VTOI(fdvp); } else { /* * From name has disappeared. IN_RENAME is not sufficient * to protect against directory races due to timing windows, * so we can't panic here. */ vrele(ap->a_fvp); return (0); } /* * Ensure that the directory entry still exists and has not * changed while the new name has been entered. If the source is * a file then the entry may have been unlinked or renamed. In * either case there is no further work to be done. If the source * is a directory then it cannot have been rmdir'ed; its link * count of three would cause a rmdir to fail with ENOTEMPTY. * The IN_RENAME flag ensures that it cannot be moved by another * rename. */ if (xp != ip) { /* * From name resolves to a different inode. IN_RENAME is * not sufficient protection against timing window races * so we can't panic here. */ } else { /* * If the source is a directory with a * new parent, the link count of the old * parent directory must be decremented * and ".." set to point to the new parent. */ if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; dirbuf = malloc(dp->i_e2fs->e2fs_bsize, M_TEMP, M_WAITOK | M_ZERO); if (!dirbuf) { error = ENOMEM; goto bad; } error = vn_rdwr(UIO_READ, fvp, (caddr_t)dirbuf, ip->i_e2fs->e2fs_bsize, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); if (error == 0) { /* Like ufs little-endian: */ namlen = dirbuf->dotdot_type; if (namlen != 2 || dirbuf->dotdot_name[0] != '.' || dirbuf->dotdot_name[1] != '.') { ext2_dirbad(xp, (doff_t)12, "rename: mangled dir"); } else { - dirbuf->dotdot_ino = newparent; + dirbuf->dotdot_ino = htole32(newparent); /* * dirblock 0 could be htree root, * try both csum update functions. */ ext2_dirent_csum_set(ip, (struct ext2fs_direct_2 *)dirbuf); ext2_dx_csum_set(ip, (struct ext2fs_direct_2 *)dirbuf); (void)vn_rdwr(UIO_WRITE, fvp, (caddr_t)dirbuf, ip->i_e2fs->e2fs_bsize, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); cache_purge(fdvp); } } free(dirbuf, M_TEMP); } error = ext2_dirremove(fdvp, fcnp); if (!error) { ext2_dec_nlink(xp); xp->i_flag |= IN_CHANGE; } xp->i_flag &= ~IN_RENAME; } if (dp) vput(fdvp); if (xp) vput(fvp); vrele(ap->a_fvp); return (error); bad: if (xp) vput(ITOV(xp)); vput(ITOV(dp)); out: if (doingdirectory) ip->i_flag &= ~IN_RENAME; if (vn_lock(fvp, LK_EXCLUSIVE) == 0) { ext2_dec_nlink(ip); ip->i_flag |= IN_CHANGE; ip->i_flag &= ~IN_RENAME; vput(fvp); } else vrele(fvp); return (error); } #ifdef UFS_ACL static int ext2_do_posix1e_acl_inheritance_dir(struct vnode *dvp, struct vnode *tvp, mode_t dmode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *dacl, *acl; acl = acl_alloc(M_WAITOK); dacl = acl_alloc(M_WAITOK); /* * Retrieve default ACL from parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. If the ACL is empty, fall through to * the "not defined or available" case. */ if (acl->acl_cnt != 0) { dmode = acl_posix1e_newfilemode(dmode, acl); ip->i_mode = dmode; *dacl = *acl; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = dmode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); if (error == 0) error = VOP_SETACL(tvp, ACL_TYPE_DEFAULT, dacl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above * was supposed to free acl. */ #ifdef DEBUG printf("ext2_mkdir: VOP_GETACL() but no VOP_SETACL()\n"); #endif /* DEBUG */ break; default: goto out; } out: acl_free(acl); acl_free(dacl); return (error); } static int ext2_do_posix1e_acl_inheritance_file(struct vnode *dvp, struct vnode *tvp, mode_t mode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *acl; acl = acl_alloc(M_WAITOK); /* * Retrieve default ACL for parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. */ if (acl->acl_cnt != 0) { /* * Two possible ways for default ACL to not * be present. First, the EA can be * undefined, or second, the default ACL can * be blank. If it's blank, fall through to * the it's not defined case. */ mode = acl_posix1e_newfilemode(mode, acl); ip->i_mode = mode; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = mode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above was * supposed to free acl. */ printf("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()\n"); /* panic("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()"); */ break; default: goto out; } out: acl_free(acl); return (error); } #endif /* UFS_ACL */ /* * Mkdir system call */ static int ext2_mkdir(struct vop_mkdir_args *ap) { struct m_ext2fs *fs; struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; struct vnode *tvp; struct dirtemplate dirtemplate, *dtp; char *buf = NULL; int error, dmode; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_mkdir: no name"); #endif dp = VTOI(dvp); if ((nlink_t)dp->i_nlink >= EXT4_LINK_MAX && !EXT2_HAS_RO_COMPAT_FEATURE(dp->i_e2fs, EXT2F_ROCOMPAT_DIR_NLINK)) { error = EMLINK; goto out; } dmode = vap->va_mode & 0777; dmode |= IFDIR; /* * Must simulate part of ext2_makeinode here to acquire the inode, * but not have it entered in the parent directory. The entry is * made later after writing "." and ".." entries. */ error = ext2_valloc(dvp, dmode, cnp->cn_cred, &tvp); if (error) goto out; ip = VTOI(tvp); fs = ip->i_e2fs; ip->i_gid = dp->i_gid; #ifdef SUIDDIR { /* * if we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * The new directory also inherits the SUID bit. * If user's UID and dir UID are the same, * 'give it away' so that the SUID is still forced on. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (dp->i_mode & ISUID) && dp->i_uid) { dmode |= ISUID; ip->i_uid = dp->i_uid; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = dmode; tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ ip->i_nlink = 2; if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; error = ext2_update(tvp, 1); /* * Bump link count in parent directory * to reflect work done below. Should * be done before reference is created * so reparation is possible if we crash. */ ext2_inc_nlink(dp); dp->i_flag |= IN_CHANGE; error = ext2_update(dvp, !DOINGASYNC(dvp)); if (error) goto bad; /* Initialize directory with "." and ".." from static template. */ if (EXT2_HAS_INCOMPAT_FEATURE(ip->i_e2fs, EXT2F_INCOMPAT_FTYPE)) dtp = &mastertemplate; else dtp = &omastertemplate; dirtemplate = *dtp; - dirtemplate.dot_ino = ip->i_number; - dirtemplate.dotdot_ino = dp->i_number; + dirtemplate.dot_ino = htole32(ip->i_number); + dirtemplate.dotdot_ino = htole32(dp->i_number); /* * note that in ext2 DIRBLKSIZ == blocksize, not DEV_BSIZE so let's * just redefine it - for this function only */ #undef DIRBLKSIZ #define DIRBLKSIZ VTOI(dvp)->i_e2fs->e2fs_bsize - dirtemplate.dotdot_reclen = DIRBLKSIZ - 12; + dirtemplate.dotdot_reclen = htole16(DIRBLKSIZ - 12); buf = malloc(DIRBLKSIZ, M_TEMP, M_WAITOK | M_ZERO); if (!buf) { error = ENOMEM; ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; goto bad; } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_METADATA_CKSUM)) { - dirtemplate.dotdot_reclen -= sizeof(struct ext2fs_direct_tail); + dirtemplate.dotdot_reclen = + htole16(le16toh(dirtemplate.dotdot_reclen) - + sizeof(struct ext2fs_direct_tail)); ext2_init_dirent_tail(EXT2_DIRENT_TAIL(buf, DIRBLKSIZ)); } memcpy(buf, &dirtemplate, sizeof(dirtemplate)); ext2_dirent_csum_set(ip, (struct ext2fs_direct_2 *)buf); error = vn_rdwr(UIO_WRITE, tvp, (caddr_t)buf, DIRBLKSIZ, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, cnp->cn_cred, NOCRED, NULL, NULL); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; goto bad; } if (DIRBLKSIZ > VFSTOEXT2(dvp->v_mount)->um_mountp->mnt_stat.f_bsize) /* XXX should grow with balloc() */ panic("ext2_mkdir: blksize"); else { ip->i_size = DIRBLKSIZ; ip->i_flag |= IN_CHANGE; } #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_dir(dvp, tvp, dmode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ /* Directory set up, now install its entry in the parent directory. */ error = ext2_direnter(ip, dvp, cnp); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } bad: /* * No need to do an explicit VOP_TRUNCATE here, vrele will do this * for us because we set the link count to 0. */ if (error) { ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); } else *ap->a_vpp = tvp; out: free(buf, M_TEMP); return (error); #undef DIRBLKSIZ #define DIRBLKSIZ DEV_BSIZE } /* * Rmdir system call. */ static int ext2_rmdir(struct vop_rmdir_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; int error; ip = VTOI(vp); dp = VTOI(dvp); /* * Verify the directory is empty (and valid). * (Rmdir ".." won't be valid since * ".." will contain a reference to * the current directory and thus be * non-empty.) */ if (!ext2_dirempty(ip, dp->i_number, cnp->cn_cred)) { error = ENOTEMPTY; goto out; } if ((dp->i_flags & APPEND) || (ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND))) { error = EPERM; goto out; } /* * Delete reference to directory before purging * inode. If we crash in between, the directory * will be reattached to lost+found, */ error = ext2_dirremove(dvp, cnp); if (error) goto out; ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; cache_purge(dvp); VOP_UNLOCK(dvp); /* * Truncate inode. The only stuff left * in the directory is "." and "..". */ ip->i_nlink = 0; error = ext2_truncate(vp, (off_t)0, IO_SYNC, cnp->cn_cred, cnp->cn_thread); cache_purge(ITOV(ip)); if (vn_lock(dvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { VOP_UNLOCK(vp); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } out: return (error); } /* * symlink -- make a symbolic link */ static int ext2_symlink(struct vop_symlink_args *ap) { struct vnode *vp, **vpp = ap->a_vpp; struct inode *ip; int len, error; error = ext2_makeinode(IFLNK | ap->a_vap->va_mode, ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); vp = *vpp; len = strlen(ap->a_target); if (len < vp->v_mount->mnt_maxsymlinklen) { ip = VTOI(vp); bcopy(ap->a_target, (char *)ip->i_shortlink, len); ip->i_size = len; ip->i_flag |= IN_CHANGE | IN_UPDATE; } else error = vn_rdwr(UIO_WRITE, vp, __DECONST(void *, ap->a_target), len, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, ap->a_cnp->cn_cred, NOCRED, NULL, NULL); if (error) vput(vp); return (error); } /* * Return target name of a symbolic link */ static int ext2_readlink(struct vop_readlink_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); int isize; isize = ip->i_size; if (isize < vp->v_mount->mnt_maxsymlinklen) { uiomove((char *)ip->i_shortlink, isize, ap->a_uio); return (0); } return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred)); } /* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. * * In order to be able to swap to a file, the ext2_bmaparray() operation may not * deadlock on memory. See ext2_bmap() for details. */ static int ext2_strategy(struct vop_strategy_args *ap) { struct buf *bp = ap->a_bp; struct vnode *vp = ap->a_vp; struct bufobj *bo; daddr_t blkno; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) panic("ext2_strategy: spec"); if (bp->b_blkno == bp->b_lblkno) { if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS) error = ext4_bmapext(vp, bp->b_lblkno, &blkno, NULL, NULL); else error = ext2_bmaparray(vp, bp->b_lblkno, &blkno, NULL, NULL); bp->b_blkno = blkno; if (error) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return (0); } if ((long)bp->b_blkno == -1) vfs_bio_clrbuf(bp); } if ((long)bp->b_blkno == -1) { bufdone(bp); return (0); } bp->b_iooffset = dbtob(bp->b_blkno); bo = VFSTOEXT2(vp->v_mount)->um_bo; BO_STRATEGY(bo, bp); return (0); } /* * Print out the contents of an inode. */ static int ext2_print(struct vop_print_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); vn_printf(ip->i_devvp, "\tino %ju", (uintmax_t)ip->i_number); if (vp->v_type == VFIFO) fifo_printinfo(vp); printf("\n"); return (0); } /* * Close wrapper for fifos. * * Update the times on the inode then do device close. */ static int ext2fifo_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (fifo_specops.vop_close(ap)); } /* * Kqfilter wrapper for fifos. * * Fall through to ext2 kqfilter routines if needed */ static int ext2fifo_kqfilter(struct vop_kqfilter_args *ap) { int error; error = fifo_specops.vop_kqfilter(ap); if (error) error = vfs_kqfilter(ap); return (error); } /* * Return POSIX pathconf information applicable to ext2 filesystems. */ static int ext2_pathconf(struct vop_pathconf_args *ap) { int error = 0; switch (ap->a_name) { case _PC_LINK_MAX: if (EXT2_HAS_RO_COMPAT_FEATURE(VTOI(ap->a_vp)->i_e2fs, EXT2F_ROCOMPAT_DIR_NLINK)) *ap->a_retval = INT_MAX; else *ap->a_retval = EXT4_LINK_MAX; break; case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; break; case _PC_PIPE_BUF: if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) *ap->a_retval = PIPE_BUF; else error = EINVAL; break; case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; break; case _PC_NO_TRUNC: *ap->a_retval = 1; break; #ifdef UFS_ACL case _PC_ACL_EXTENDED: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = 1; else *ap->a_retval = 0; break; case _PC_ACL_PATH_MAX: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = ACL_MAX_ENTRIES; else *ap->a_retval = 3; break; #endif /* UFS_ACL */ case _PC_MIN_HOLE_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_PRIO_IO: *ap->a_retval = 0; break; case _PC_SYNC_IO: *ap->a_retval = 0; break; case _PC_ALLOC_SIZE_MIN: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize; break; case _PC_FILESIZEBITS: *ap->a_retval = 64; break; case _PC_REC_INCR_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_MAX_XFER_SIZE: *ap->a_retval = -1; /* means ``unlimited'' */ break; case _PC_REC_MIN_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_XFER_ALIGN: *ap->a_retval = PAGE_SIZE; break; case _PC_SYMLINK_MAX: *ap->a_retval = MAXPATHLEN; break; default: error = vop_stdpathconf(ap); break; } return (error); } /* * Vnode operation to remove a named attribute. */ static int ext2_deleteextattr(struct vop_deleteextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_delete(ip, ap->a_attrnamespace, ap->a_name); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_delete(ip, ap->a_attrnamespace, ap->a_name); return (error); } /* * Vnode operation to retrieve a named extended attribute. */ static int ext2_getextattr(struct vop_getextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to retrieve extended attributes on a vnode. */ static int ext2_listextattr(struct vop_listextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); if (error) return (error); } if (ip->i_facl) error = ext2_extattr_block_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to set a named attribute. */ static int ext2_setextattr(struct vop_setextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ext2_extattr_valid_attrname(ap->a_attrnamespace, ap->a_name); if (error) return (error); if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); if (error != ENOSPC) return (error); } error = ext2_extattr_block_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); return (error); } /* * Vnode pointer to File handle */ /* ARGSUSED */ static int ext2_vptofh(struct vop_vptofh_args *ap) { struct inode *ip; struct ufid *ufhp; ip = VTOI(ap->a_vp); ufhp = (struct ufid *)ap->a_fhp; ufhp->ufid_len = sizeof(struct ufid); ufhp->ufid_ino = ip->i_number; ufhp->ufid_gen = ip->i_gen; return (0); } /* * Initialize the vnode associated with a new inode, handle aliased * vnodes. */ int ext2_vinit(struct mount *mntp, struct vop_vector *fifoops, struct vnode **vpp) { struct inode *ip; struct vnode *vp; vp = *vpp; ip = VTOI(vp); vp->v_type = IFTOVT(ip->i_mode); /* * Only unallocated inodes should be of type VNON. */ if (ip->i_mode != 0 && vp->v_type == VNON) return (EINVAL); if (vp->v_type == VFIFO) vp->v_op = fifoops; if (ip->i_number == EXT2_ROOTINO) vp->v_vflag |= VV_ROOT; ip->i_modrev = init_va_filerev(); *vpp = vp; return (0); } /* * Allocate a new inode. */ static int ext2_makeinode(int mode, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { struct inode *ip, *pdir; struct vnode *tvp; int error; pdir = VTOI(dvp); #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_makeinode: no name"); #endif *vpp = NULL; if ((mode & IFMT) == 0) mode |= IFREG; error = ext2_valloc(dvp, mode, cnp->cn_cred, &tvp); if (error) { return (error); } ip = VTOI(tvp); ip->i_gid = pdir->i_gid; #ifdef SUIDDIR { /* * if we are * not the owner of the directory, * and we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * Note that this drops off the execute bits for security. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (pdir->i_mode & ISUID) && (pdir->i_uid != cnp->cn_cred->cr_uid) && pdir->i_uid) { ip->i_uid = pdir->i_uid; mode &= ~07111; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = mode; tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */ ip->i_nlink = 1; if ((ip->i_mode & ISGID) && !groupmember(ip->i_gid, cnp->cn_cred)) { if (priv_check_cred(cnp->cn_cred, PRIV_VFS_RETAINSUGID)) ip->i_mode &= ~ISGID; } if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; /* * Make sure inode goes to disk before directory entry. */ error = ext2_update(tvp, !DOINGASYNC(tvp)); if (error) goto bad; #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_file(dvp, tvp, mode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ error = ext2_direnter(ip, dvp, cnp); if (error) goto bad; *vpp = tvp; return (0); bad: /* * Write error occurred trying to update the inode * or the directory so must deallocate the inode. */ ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); return (error); } /* * Vnode op for reading. */ static int ext2_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; struct uio *uio; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t bytesinfile; long size, xfersize, blkoffset; int error, orig_resid, seqcount; int ioflag; vp = ap->a_vp; uio = ap->a_uio; ioflag = ap->a_ioflag; seqcount = ap->a_ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_READ) panic("%s: mode", "ext2_read"); if (vp->v_type == VLNK) { if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) panic("%s: short symlink", "ext2_read"); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("%s: type %d", "ext2_read", vp->v_type); #endif orig_resid = uio->uio_resid; KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0")); if (orig_resid == 0) return (0); KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0")); fs = ip->i_e2fs; if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) return (EOVERFLOW); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = blksize(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (lblktosize(fs, nextlbn) >= ip->i_size) error = bread(vp, lbn, size, NOCRED, &bp); else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, blkoffset + uio->uio_resid, seqcount, 0, &bp); } else if (seqcount > 1) { u_int nextsize = blksize(fs, ip, nextlbn); error = breadn(vp, lbn, size, &nextlbn, &nextsize, 1, NOCRED, &bp); } else error = bread(vp, lbn, size, NOCRED, &bp); if (error) { brelse(bp); bp = NULL; break; } /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error, * then we want to ensure that we do not uiomove bad * or uninitialized data. */ size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; vfs_bio_brelse(bp, ioflag); } /* * This can only happen in the case of an error because the loop * above resets bp to NULL on each iteration and on normal * completion has not set a new value into it. so it must have come * from a 'break' statement */ if (bp != NULL) vfs_bio_brelse(bp, ioflag); if ((error == 0 || uio->uio_resid != orig_resid) && (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) ip->i_flag |= IN_ACCESS; return (error); } static int ext2_ioctl(struct vop_ioctl_args *ap) { struct vnode *vp; int error; vp = ap->a_vp; switch (ap->a_command) { case FIOSEEKDATA: if (!(VTOI(vp)->i_flag & IN_E4EXTENTS)) { error = vn_lock(vp, LK_SHARED); if (error == 0) { error = ext2_bmap_seekdata(vp, (off_t *)ap->a_data); VOP_UNLOCK(vp); } else error = EBADF; return (error); } case FIOSEEKHOLE: return (vn_bmap_seekhole(vp, ap->a_command, (off_t *)ap->a_data, ap->a_cred)); default: return (ENOTTY); } } /* * Vnode op for writing. */ static int ext2_write(struct vop_write_args *ap) { struct vnode *vp; struct uio *uio; struct inode *ip; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn; off_t osize; int blkoffset, error, flags, ioflag, resid, size, seqcount, xfersize; ioflag = ap->a_ioflag; uio = ap->a_uio; vp = ap->a_vp; seqcount = ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_WRITE) panic("%s: mode", "ext2_write"); #endif switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = ip->i_size; if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) return (EPERM); /* FALLTHROUGH */ case VLNK: break; case VDIR: /* XXX differs from ffs -- this is called from ext2_mkdir(). */ if ((ioflag & IO_SYNC) == 0) panic("ext2_write: nonsync dir write"); break; default: panic("ext2_write: type %p %d (%jd,%jd)", (void *)vp, vp->v_type, (intmax_t)uio->uio_offset, (intmax_t)uio->uio_resid); } KASSERT(uio->uio_resid >= 0, ("ext2_write: uio->uio_resid < 0")); KASSERT(uio->uio_offset >= 0, ("ext2_write: uio->uio_offset < 0")); fs = ip->i_e2fs; if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->e2fs_maxfilesize) return (EFBIG); /* * Maybe this should be above the vnode op call, but so long as * file servers have no limits, I don't think it matters. */ if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); resid = uio->uio_resid; osize = ip->i_size; if (seqcount > BA_SEQMAX) flags = BA_SEQMAX << BA_SEQSHIFT; else flags = seqcount << BA_SEQSHIFT; if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) flags |= IO_SYNC; for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (uio->uio_offset + xfersize > ip->i_size) vnode_pager_setsize(vp, uio->uio_offset + xfersize); /* * We must perform a read-before-write if the transfer size * does not cover the entire buffer. */ if (fs->e2fs_bsize > xfersize) flags |= BA_CLRBUF; else flags &= ~BA_CLRBUF; error = ext2_balloc(ip, lbn, blkoffset + xfersize, ap->a_cred, &bp, flags); if (error != 0) break; if ((ioflag & (IO_SYNC | IO_INVAL)) == (IO_SYNC | IO_INVAL)) bp->b_flags |= B_NOCACHE; if (uio->uio_offset + xfersize > ip->i_size) ip->i_size = uio->uio_offset + xfersize; size = blksize(fs, ip, lbn) - bp->b_resid; if (size < xfersize) xfersize = size; error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); /* * If the buffer is not already filled and we encounter an * error while trying to fill it, we have to clear out any * garbage data from the pages instantiated for the buffer. * If we do not, a failed uiomove() during a write can leave * the prior contents of the pages exposed to a userland mmap. * * Note that we need only clear buffers with a transfer size * equal to the block size because buffers with a shorter * transfer size were cleared above by the call to ext2_balloc() * with the BA_CLRBUF flag set. * * If the source region for uiomove identically mmaps the * buffer, uiomove() performed the NOP copy, and the buffer * content remains valid because the page fault handler * validated the pages. */ if (error != 0 && (bp->b_flags & B_CACHE) == 0 && fs->e2fs_bsize == xfersize) vfs_bio_clrbuf(bp); vfs_bio_set_flags(bp, ioflag); /* * If IO_SYNC each buffer is written synchronously. Otherwise * if we have a severe page deficiency write the buffer * asynchronously. Otherwise try to cluster, and if that * doesn't do it then either do an async write (if O_DIRECT), * or a delayed write (if not). */ if (ioflag & IO_SYNC) { (void)bwrite(bp); } else if (vm_page_count_severe() || buf_dirty_count_severe() || (ioflag & IO_ASYNC)) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else if (xfersize + blkoffset == fs->e2fs_fsize) { if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { bp->b_flags |= B_CLUSTEROK; cluster_write(vp, bp, ip->i_size, seqcount, 0); } else { bawrite(bp); } } else if (ioflag & IO_DIRECT) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else { bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } if (error || xfersize == 0) break; } /* * If we successfully wrote any data, and we are not the superuser * we clear the setuid and setgid bits as a precaution against * tampering. */ if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ap->a_cred) { if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID)) ip->i_mode &= ~(ISUID | ISGID); } if (error) { if (ioflag & IO_UNIT) { (void)ext2_truncate(vp, osize, ioflag & IO_SYNC, ap->a_cred, uio->uio_td); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } } if (uio->uio_resid != resid) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (ioflag & IO_SYNC) error = ext2_update(vp, 1); } return (error); } Index: head/sys/fs/ext2fs/ext2fs.h =================================================================== --- head/sys/fs/ext2fs/ext2fs.h (revision 361135) +++ head/sys/fs/ext2fs/ext2fs.h (revision 361136) @@ -1,432 +1,432 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science * * $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Aditya Sarawgi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #ifndef _FS_EXT2FS_EXT2FS_H_ #define _FS_EXT2FS_EXT2FS_H_ #include /* * Super block for an ext2fs file system. */ struct ext2fs { uint32_t e2fs_icount; /* Inode count */ uint32_t e2fs_bcount; /* blocks count */ uint32_t e2fs_rbcount; /* reserved blocks count */ uint32_t e2fs_fbcount; /* free blocks count */ uint32_t e2fs_ficount; /* free inodes count */ uint32_t e2fs_first_dblock; /* first data block */ uint32_t e2fs_log_bsize; /* block size = 1024*(2^e2fs_log_bsize) */ uint32_t e2fs_log_fsize; /* fragment size */ uint32_t e2fs_bpg; /* blocks per group */ uint32_t e2fs_fpg; /* frags per group */ uint32_t e2fs_ipg; /* inodes per group */ uint32_t e2fs_mtime; /* mount time */ uint32_t e2fs_wtime; /* write time */ uint16_t e2fs_mnt_count; /* mount count */ uint16_t e2fs_max_mnt_count; /* max mount count */ uint16_t e2fs_magic; /* magic number */ uint16_t e2fs_state; /* file system state */ uint16_t e2fs_beh; /* behavior on errors */ uint16_t e2fs_minrev; /* minor revision level */ uint32_t e2fs_lastfsck; /* time of last fsck */ uint32_t e2fs_fsckintv; /* max time between fscks */ uint32_t e2fs_creator; /* creator OS */ uint32_t e2fs_rev; /* revision level */ uint16_t e2fs_ruid; /* default uid for reserved blocks */ uint16_t e2fs_rgid; /* default gid for reserved blocks */ /* EXT2_DYNAMIC_REV superblocks */ uint32_t e2fs_first_ino; /* first non-reserved inode */ uint16_t e2fs_inode_size; /* size of inode structure */ uint16_t e2fs_block_group_nr; /* block grp number of this sblk*/ uint32_t e2fs_features_compat; /* compatible feature set */ uint32_t e2fs_features_incompat; /* incompatible feature set */ uint32_t e2fs_features_rocompat; /* RO-compatible feature set */ uint8_t e2fs_uuid[16]; /* 128-bit uuid for volume */ char e2fs_vname[16]; /* volume name */ char e2fs_fsmnt[64]; /* name mounted on */ uint32_t e2fs_algo; /* For compression */ uint8_t e2fs_prealloc; /* # of blocks for old prealloc */ uint8_t e2fs_dir_prealloc; /* # of blocks for old prealloc dirs */ uint16_t e2fs_reserved_ngdb; /* # of reserved gd blocks for resize */ char e3fs_journal_uuid[16]; /* uuid of journal superblock */ uint32_t e3fs_journal_inum; /* inode number of journal file */ uint32_t e3fs_journal_dev; /* device number of journal file */ uint32_t e3fs_last_orphan; /* start of list of inodes to delete */ uint32_t e3fs_hash_seed[4]; /* HTREE hash seed */ char e3fs_def_hash_version;/* Default hash version to use */ char e3fs_jnl_backup_type; uint16_t e3fs_desc_size; /* size of group descriptor */ uint32_t e3fs_default_mount_opts; uint32_t e3fs_first_meta_bg; /* First metablock block group */ uint32_t e3fs_mkfs_time; /* when the fs was created */ uint32_t e3fs_jnl_blks[17]; /* backup of the journal inode */ uint32_t e4fs_bcount_hi; /* high bits of blocks count */ uint32_t e4fs_rbcount_hi; /* high bits of reserved blocks count */ uint32_t e4fs_fbcount_hi; /* high bits of free blocks count */ uint16_t e4fs_min_extra_isize; /* all inodes have some bytes */ uint16_t e4fs_want_extra_isize;/* inodes must reserve some bytes */ uint32_t e4fs_flags; /* miscellaneous flags */ uint16_t e4fs_raid_stride; /* RAID stride */ uint16_t e4fs_mmpintv; /* seconds to wait in MMP checking */ uint64_t e4fs_mmpblk; /* block for multi-mount protection */ uint32_t e4fs_raid_stripe_wid; /* blocks on data disks (N * stride) */ uint8_t e4fs_log_gpf; /* FLEX_BG group size */ uint8_t e4fs_chksum_type; /* metadata checksum algorithm used */ uint8_t e4fs_encrypt; /* versioning level for encryption */ uint8_t e4fs_reserved_pad; uint64_t e4fs_kbytes_written; /* number of lifetime kilobytes */ uint32_t e4fs_snapinum; /* inode number of active snapshot */ uint32_t e4fs_snapid; /* sequential ID of active snapshot */ uint64_t e4fs_snaprbcount; /* reserved blocks for active snapshot */ uint32_t e4fs_snaplist; /* inode number for on-disk snapshot */ uint32_t e4fs_errcount; /* number of file system errors */ uint32_t e4fs_first_errtime; /* first time an error happened */ uint32_t e4fs_first_errino; /* inode involved in first error */ uint64_t e4fs_first_errblk; /* block involved of first error */ uint8_t e4fs_first_errfunc[32];/* function where error happened */ uint32_t e4fs_first_errline; /* line number where error happened */ uint32_t e4fs_last_errtime; /* most recent time of an error */ uint32_t e4fs_last_errino; /* inode involved in last error */ uint32_t e4fs_last_errline; /* line number where error happened */ uint64_t e4fs_last_errblk; /* block involved of last error */ uint8_t e4fs_last_errfunc[32]; /* function where error happened */ uint8_t e4fs_mount_opts[64]; uint32_t e4fs_usrquota_inum; /* inode for tracking user quota */ uint32_t e4fs_grpquota_inum; /* inode for tracking group quota */ uint32_t e4fs_overhead_clusters;/* overhead blocks/clusters */ uint32_t e4fs_backup_bgs[2]; /* groups with sparse_super2 SBs */ uint8_t e4fs_encrypt_algos[4];/* encryption algorithms in use */ uint8_t e4fs_encrypt_pw_salt[16];/* salt used for string2key */ uint32_t e4fs_lpf_ino; /* location of the lost+found inode */ uint32_t e4fs_proj_quota_inum; /* inode for tracking project quota */ uint32_t e4fs_chksum_seed; /* checksum seed */ uint32_t e4fs_reserved[98]; /* padding to the end of the block */ uint32_t e4fs_sbchksum; /* superblock checksum */ }; /* * The path name on which the file system is mounted is maintained * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in * the super block for this name. */ #define MAXMNTLEN 512 /* * In-Memory Superblock */ struct m_ext2fs { struct ext2fs * e2fs; char e2fs_fsmnt[MAXMNTLEN];/* name mounted on */ char e2fs_ronly; /* mounted read-only flag */ char e2fs_fmod; /* super block modified flag */ uint64_t e2fs_bcount; /* blocks count */ uint64_t e2fs_rbcount; /* reserved blocks count */ uint64_t e2fs_fbcount; /* free blocks count */ + uint32_t e2fs_ficount; /* free inodes count */ uint32_t e2fs_bsize; /* Block size */ uint32_t e2fs_bshift; /* calc of logical block no */ uint32_t e2fs_bpg; /* Number of blocks per group */ int64_t e2fs_qbmask; /* = s_blocksize -1 */ uint32_t e2fs_fsbtodb; /* Shift to get disk block */ uint32_t e2fs_ipg; /* Number of inodes per group */ uint32_t e2fs_ipb; /* Number of inodes per block */ uint32_t e2fs_itpg; /* Number of inode table per group */ uint32_t e2fs_fsize; /* Size of fragments per block */ uint32_t e2fs_fpb; /* Number of fragments per block */ uint32_t e2fs_fpg; /* Number of fragments per group */ uint32_t e2fs_gdbcount; /* Number of group descriptors */ uint32_t e2fs_gcount; /* Number of groups */ uint32_t e2fs_isize; /* Size of inode */ uint32_t e2fs_total_dir; /* Total number of directories */ uint8_t *e2fs_contigdirs; /* (u) # of contig. allocated dirs */ char e2fs_wasvalid; /* valid at mount time */ off_t e2fs_maxfilesize; struct ext2_gd *e2fs_gd; /* Group Descriptors */ int32_t e2fs_contigsumsize; /* size of cluster summary array */ int32_t *e2fs_maxcluster; /* max cluster in each cyl group */ struct csum *e2fs_clustersum; /* cluster summary in each cyl group */ int32_t e2fs_uhash; /* 3 if hash should be signed, 0 if not */ uint32_t e2fs_csum_seed; /* sb checksum seed */ }; /* cluster summary information */ struct csum { int8_t cs_init; /* cluster summary has been initialized */ int32_t *cs_sum; /* cluster summary array */ }; /* * The second extended file system magic number */ #define E2FS_MAGIC 0xEF53 /* * Revision levels */ #define E2FS_REV0 0 /* The good old (original) format */ #define E2FS_REV1 1 /* V2 format w/ dynamic inode sizes */ #define E2FS_REV0_INODE_SIZE 128 /* * Metadata checksum algorithm codes */ #define EXT4_CRC32C_CHKSUM 1 /* * compatible/incompatible features */ #define EXT2F_COMPAT_PREALLOC 0x0001 #define EXT2F_COMPAT_IMAGIC_INODES 0x0002 #define EXT2F_COMPAT_HASJOURNAL 0x0004 #define EXT2F_COMPAT_EXT_ATTR 0x0008 #define EXT2F_COMPAT_RESIZE 0x0010 #define EXT2F_COMPAT_DIRHASHINDEX 0x0020 #define EXT2F_COMPAT_LAZY_BG 0x0040 #define EXT2F_COMPAT_EXCLUDE_BITMAP 0x0100 #define EXT2F_COMPAT_SPARSESUPER2 0x0200 #define EXT2F_ROCOMPAT_SPARSESUPER 0x0001 #define EXT2F_ROCOMPAT_LARGEFILE 0x0002 #define EXT2F_ROCOMPAT_BTREE_DIR 0x0004 #define EXT2F_ROCOMPAT_HUGE_FILE 0x0008 #define EXT2F_ROCOMPAT_GDT_CSUM 0x0010 #define EXT2F_ROCOMPAT_DIR_NLINK 0x0020 #define EXT2F_ROCOMPAT_EXTRA_ISIZE 0x0040 #define EXT2F_ROCOMPAT_HAS_SNAPSHOT 0x0080 #define EXT2F_ROCOMPAT_QUOTA 0x0100 #define EXT2F_ROCOMPAT_BIGALLOC 0x0200 #define EXT2F_ROCOMPAT_METADATA_CKSUM 0x0400 #define EXT2F_ROCOMPAT_REPLICA 0x0800 #define EXT2F_ROCOMPAT_READONLY 0x1000 #define EXT2F_ROCOMPAT_PROJECT 0x2000 #define EXT2F_INCOMPAT_COMP 0x0001 #define EXT2F_INCOMPAT_FTYPE 0x0002 #define EXT2F_INCOMPAT_RECOVER 0x0004 #define EXT2F_INCOMPAT_JOURNAL_DEV 0x0008 #define EXT2F_INCOMPAT_META_BG 0x0010 #define EXT2F_INCOMPAT_EXTENTS 0x0040 #define EXT2F_INCOMPAT_64BIT 0x0080 #define EXT2F_INCOMPAT_MMP 0x0100 #define EXT2F_INCOMPAT_FLEX_BG 0x0200 #define EXT2F_INCOMPAT_EA_INODE 0x0400 #define EXT2F_INCOMPAT_DIRDATA 0x1000 #define EXT2F_INCOMPAT_CSUM_SEED 0x2000 #define EXT2F_INCOMPAT_LARGEDIR 0x4000 #define EXT2F_INCOMPAT_INLINE_DATA 0x8000 #define EXT2F_INCOMPAT_ENCRYPT 0x10000 struct ext2_feature { int mask; const char *name; }; static const struct ext2_feature compat[] = { { EXT2F_COMPAT_PREALLOC, "dir_prealloc" }, { EXT2F_COMPAT_IMAGIC_INODES, "imagic_inodes" }, { EXT2F_COMPAT_HASJOURNAL, "has_journal" }, { EXT2F_COMPAT_EXT_ATTR, "ext_attr" }, { EXT2F_COMPAT_RESIZE, "resize_inode" }, { EXT2F_COMPAT_DIRHASHINDEX, "dir_index" }, { EXT2F_COMPAT_EXCLUDE_BITMAP, "snapshot_bitmap" }, { EXT2F_COMPAT_SPARSESUPER2, "sparse_super2" } }; static const struct ext2_feature ro_compat[] = { { EXT2F_ROCOMPAT_SPARSESUPER, "sparse_super" }, { EXT2F_ROCOMPAT_LARGEFILE, "large_file" }, { EXT2F_ROCOMPAT_BTREE_DIR, "btree_dir" }, { EXT2F_ROCOMPAT_HUGE_FILE, "huge_file" }, { EXT2F_ROCOMPAT_GDT_CSUM, "uninit_groups" }, { EXT2F_ROCOMPAT_DIR_NLINK, "dir_nlink" }, { EXT2F_ROCOMPAT_EXTRA_ISIZE, "extra_isize" }, { EXT2F_ROCOMPAT_HAS_SNAPSHOT, "snapshot" }, { EXT2F_ROCOMPAT_QUOTA, "quota" }, { EXT2F_ROCOMPAT_BIGALLOC, "bigalloc" }, { EXT2F_ROCOMPAT_METADATA_CKSUM, "metadata_csum" }, { EXT2F_ROCOMPAT_REPLICA, "replica" }, { EXT2F_ROCOMPAT_READONLY, "ro" }, { EXT2F_ROCOMPAT_PROJECT, "project" } }; static const struct ext2_feature incompat[] = { { EXT2F_INCOMPAT_COMP, "compression" }, { EXT2F_INCOMPAT_FTYPE, "filetype" }, { EXT2F_INCOMPAT_RECOVER, "needs_recovery" }, { EXT2F_INCOMPAT_JOURNAL_DEV, "journal_dev" }, { EXT2F_INCOMPAT_META_BG, "meta_bg" }, { EXT2F_INCOMPAT_EXTENTS, "extents" }, { EXT2F_INCOMPAT_64BIT, "64bit" }, { EXT2F_INCOMPAT_MMP, "mmp" }, { EXT2F_INCOMPAT_FLEX_BG, "flex_bg" }, { EXT2F_INCOMPAT_EA_INODE, "ea_inode" }, { EXT2F_INCOMPAT_DIRDATA, "dirdata" }, { EXT2F_INCOMPAT_CSUM_SEED, "metadata_csum_seed" }, { EXT2F_INCOMPAT_LARGEDIR, "large_dir" }, { EXT2F_INCOMPAT_INLINE_DATA, "inline_data" }, { EXT2F_INCOMPAT_ENCRYPT, "encrypt" } }; /* * Features supported in this implementation * * We support the following REV1 features: * - EXT2F_ROCOMPAT_SPARSESUPER * - EXT2F_ROCOMPAT_LARGEFILE * - EXT2F_ROCOMPAT_EXTRA_ISIZE * - EXT2F_INCOMPAT_FTYPE * * We partially (read-only) support the following EXT4 features: * - EXT2F_ROCOMPAT_HUGE_FILE * - EXT2F_INCOMPAT_EXTENTS * */ #define EXT2F_COMPAT_SUPP EXT2F_COMPAT_DIRHASHINDEX #define EXT2F_ROCOMPAT_SUPP (EXT2F_ROCOMPAT_SPARSESUPER | \ EXT2F_ROCOMPAT_LARGEFILE | \ EXT2F_ROCOMPAT_GDT_CSUM | \ EXT2F_ROCOMPAT_METADATA_CKSUM | \ EXT2F_ROCOMPAT_DIR_NLINK | \ EXT2F_ROCOMPAT_HUGE_FILE | \ EXT2F_ROCOMPAT_EXTRA_ISIZE) #define EXT2F_INCOMPAT_SUPP (EXT2F_INCOMPAT_FTYPE | \ EXT2F_INCOMPAT_META_BG | \ EXT2F_INCOMPAT_EXTENTS | \ EXT2F_INCOMPAT_64BIT | \ EXT2F_INCOMPAT_FLEX_BG | \ EXT2F_INCOMPAT_CSUM_SEED) /* Assume that user mode programs are passing in an ext2fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test * macros from user land. */ #define EXT2_SB(sb) (sb) /* * Feature set definitions */ #define EXT2_HAS_COMPAT_FEATURE(sb,mask) \ - ( EXT2_SB(sb)->e2fs->e2fs_features_compat & htole32(mask) ) + ( le32toh(EXT2_SB(sb)->e2fs->e2fs_features_compat) & mask) #define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \ - ( EXT2_SB(sb)->e2fs->e2fs_features_rocompat & htole32(mask) ) + ( le32toh(EXT2_SB(sb)->e2fs->e2fs_features_rocompat) & mask) #define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \ - ( EXT2_SB(sb)->e2fs->e2fs_features_incompat & htole32(mask) ) + ( le32toh(EXT2_SB(sb)->e2fs->e2fs_features_incompat) & mask) /* * File clean flags */ #define E2FS_ISCLEAN 0x0001 /* Unmounted cleanly */ #define E2FS_ERRORS 0x0002 /* Errors detected */ /* * Filesystem miscellaneous flags */ #define E2FS_SIGNED_HASH 0x0001 #define E2FS_UNSIGNED_HASH 0x0002 #define EXT2_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ #define EXT2_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ #define EXT2_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ /* ext2 file system block group descriptor */ struct ext2_gd { uint32_t ext2bgd_b_bitmap; /* blocks bitmap block */ uint32_t ext2bgd_i_bitmap; /* inodes bitmap block */ uint32_t ext2bgd_i_tables; /* inodes table block */ uint16_t ext2bgd_nbfree; /* number of free blocks */ uint16_t ext2bgd_nifree; /* number of free inodes */ uint16_t ext2bgd_ndirs; /* number of directories */ uint16_t ext4bgd_flags; /* block group flags */ uint32_t ext4bgd_x_bitmap; /* snapshot exclusion bitmap loc. */ uint16_t ext4bgd_b_bmap_csum; /* block bitmap checksum */ uint16_t ext4bgd_i_bmap_csum; /* inode bitmap checksum */ uint16_t ext4bgd_i_unused; /* unused inode count */ uint16_t ext4bgd_csum; /* group descriptor checksum */ uint32_t ext4bgd_b_bitmap_hi; /* high bits of blocks bitmap block */ uint32_t ext4bgd_i_bitmap_hi; /* high bits of inodes bitmap block */ uint32_t ext4bgd_i_tables_hi; /* high bits of inodes table block */ uint16_t ext4bgd_nbfree_hi; /* high bits of number of free blocks */ uint16_t ext4bgd_nifree_hi; /* high bits of number of free inodes */ uint16_t ext4bgd_ndirs_hi; /* high bits of number of directories */ uint16_t ext4bgd_i_unused_hi; /* high bits of unused inode count */ uint32_t ext4bgd_x_bitmap_hi; /* high bits of snapshot exclusion */ uint16_t ext4bgd_b_bmap_csum_hi;/* high bits of block bitmap checksum */ uint16_t ext4bgd_i_bmap_csum_hi;/* high bits of inode bitmap checksum */ uint32_t ext4bgd_reserved; }; #define E2FS_REV0_GD_SIZE (sizeof(struct ext2_gd) / 2) #define E2FS_64BIT_GD_SIZE (sizeof(struct ext2_gd)) /* * Macro-instructions used to manage several block sizes */ #define EXT2_MIN_BLOCK_LOG_SIZE 10 #define EXT2_BLOCK_SIZE(s) ((s)->e2fs_bsize) #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof(uint32_t)) #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->e2fs_isize) /* * Macro-instructions used to manage fragments */ #define EXT2_MIN_FRAG_SIZE 1024 #define EXT2_MIN_FRAG_LOG_SIZE 10 #define EXT2_MAX_FRAG_LOG_SIZE 30 #define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->e2fs_fsize) #define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->e2fs_fpb) /* * Macro-instructions used to manage group descriptors */ #define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->e2fs_bpg) -#define EXT2_DESCS_PER_BLOCK(s) (EXT2_HAS_INCOMPAT_FEATURE((s), \ - EXT2F_INCOMPAT_64BIT) ? ((s)->e2fs_bsize / sizeof(struct ext2_gd)) : \ - ((s)->e2fs_bsize / E2FS_REV0_GD_SIZE)) +#define EXT2_DESCS_PER_BLOCK(s) (EXT2_HAS_INCOMPAT_FEATURE((s), \ + EXT2F_INCOMPAT_64BIT) ? ((s)->e2fs_bsize / sizeof(struct ext2_gd)) : \ + ((s)->e2fs_bsize / E2FS_REV0_GD_SIZE)) /* * Macro-instructions used to manage inodes */ -#define EXT2_FIRST_INO(s) ((EXT2_SB(s)->e2fs->e2fs_rev == E2FS_REV0) ? \ - EXT2_FIRSTINO : \ - EXT2_SB(s)->e2fs->e2fs_first_ino) +#define EXT2_FIRST_INO(s) (le32toh((EXT2_SB(s)->e2fs->e2fs_rev) == \ + E2FS_REV0) ? EXT2_FIRSTINO : le32toh(EXT2_SB(s)->e2fs->e2fs_first_ino)) #endif /* !_FS_EXT2FS_EXT2FS_H_ */ Index: head/sys/fs/ext2fs/fs.h =================================================================== --- head/sys/fs/ext2fs/fs.h (revision 361135) +++ head/sys/fs/ext2fs/fs.h (revision 361136) @@ -1,169 +1,169 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fs.h 8.7 (Berkeley) 4/19/94 * $FreeBSD$ */ #ifndef _FS_EXT2FS_FS_H_ #define _FS_EXT2FS_FS_H_ /* * Each disk drive contains some number of file systems. * A file system consists of a number of cylinder groups. * Each cylinder group has inodes and data. * * A file system is described by its super-block, which in turn * describes the cylinder groups. The super-block is critical * data and is replicated in each cylinder group to protect against * catastrophic loss. This is done at `newfs' time and the critical * super-block data does not change, so the copies need not be * referenced further unless disaster strikes. * * The first boot and super blocks are given in absolute disk addresses. * The byte-offset forms are preferred, as they don't imply a sector size. */ #define SBSIZE 1024 #define SBLOCK 2 /* * The path name on which the file system is mounted is maintained * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in * the super block for this name. */ #define MAXMNTLEN 512 /* * A summary of contiguous blocks of various sizes is maintained * in each cylinder group. Normally this is set by the initial * value of fs_maxcontig. * * XXX:FS_MAXCONTIG is set to 16 to conserve space. Here we set * EXT2_MAXCONTIG to 32 for better performance. */ #define EXT2_MAXCONTIG 32 /* * Grigoriy Orlov has done some extensive work to fine * tune the layout preferences for directories within a filesystem. * His algorithm can be tuned by adjusting the following parameters * which tell the system the average file size and the average number * of files per directory. These defaults are well selected for typical * filesystems, but may need to be tuned for odd cases like filesystems * being used for squid caches or news spools. * AVFPDIR is the expected number of files per directory. AVGDIRSIZE is * obtained by multiplying AVFPDIR and AVFILESIZ which is assumed to be * 16384. */ #define AFPDIR 64 #define AVGDIRSIZE 1048576 /* * Macros for access to superblock array structures */ /* * Turn file system block numbers into disk block addresses. * This maps file system blocks to device size blocks. */ #define fsbtodb(fs, b) ((daddr_t)(b) << (fs)->e2fs_fsbtodb) #define dbtofsb(fs, b) ((b) >> (fs)->e2fs_fsbtodb) /* get group containing inode */ #define ino_to_cg(fs, x) (((x) - 1) / (fs->e2fs_ipg)) /* get block containing inode from its number x */ #define ino_to_fsba(fs, x) \ - (e2fs_gd_get_i_tables(&(fs)->e2fs_gd[ino_to_cg((fs), (x))]) + \ - (((x) - 1) % (fs)->e2fs->e2fs_ipg) / (fs)->e2fs_ipb) + (e2fs_gd_get_i_tables(&(fs)->e2fs_gd[ino_to_cg((fs), (x))]) + \ + (((x) - 1) % (fs)->e2fs_ipg) / (fs)->e2fs_ipb) /* get offset for inode in block */ #define ino_to_fsbo(fs, x) ((x-1) % (fs->e2fs_ipb)) /* * Give cylinder group number for a file system block. * Give cylinder group block number for a file system block. */ -#define dtog(fs, d) (((d) - fs->e2fs->e2fs_first_dblock) / \ - EXT2_BLOCKS_PER_GROUP(fs)) -#define dtogd(fs, d) (((d) - fs->e2fs->e2fs_first_dblock) % \ - EXT2_BLOCKS_PER_GROUP(fs)) +#define dtog(fs, d) (((d) - le32toh(fs->e2fs->e2fs_first_dblock)) / \ + EXT2_BLOCKS_PER_GROUP(fs)) +#define dtogd(fs, d) (((d) - le32toh(fs->e2fs->e2fs_first_dblock)) % \ + EXT2_BLOCKS_PER_GROUP(fs)) /* * The following macros optimize certain frequently calculated * quantities by using shifts and masks in place of divisions * modulos and multiplications. */ #define blkoff(fs, loc) /* calculates (loc % fs->fs_bsize) */ \ ((loc) & (fs)->e2fs_qbmask) #define lblktosize(fs, blk) /* calculates (blk * fs->fs_bsize) */ \ ((blk) << (fs->e2fs_bshift)) #define lblkno(fs, loc) /* calculates (loc / fs->fs_bsize) */ \ ((loc) >> (fs->e2fs_bshift)) /* no fragments -> logical block number equal # of frags */ #define numfrags(fs, loc) /* calculates (loc / fs->fs_fsize) */ \ ((loc) >> (fs->e2fs_bshift)) #define fragroundup(fs, size) /* calculates roundup(size, fs->fs_fsize) */ \ roundup(size, fs->e2fs_fsize) /* was (((size) + (fs)->fs_qfmask) & (fs)->fs_fmask) */ /* * Determining the size of a file block in the file system. * easy w/o fragments */ #define blksize(fs, ip, lbn) ((fs)->e2fs_fsize) /* * INOPB is the number of inodes in a secondary storage block. */ #define INOPB(fs) (fs->e2fs_ipb) /* * NINDIR is the number of indirects in a file system block. */ #define NINDIR(fs) (EXT2_ADDR_PER_BLOCK(fs)) /* * Use if additional debug logging is required. */ /* #define EXT2FS_PRINT_EXTENTS */ #endif /* !_FS_EXT2FS_FS_H_ */