Index: head/sys/fs/ext2fs/ext2_alloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_alloc.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_alloc.c (revision 327584) @@ -1,1346 +1,1432 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); static u_long ext2_dirpref(struct inode *); static e4fs_daddr_t ext2_hashalloc(struct inode *, int, long, int, daddr_t (*)(struct inode *, int, daddr_t, int)); static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); /* * Allocate a block in the filesystem. * * A preference may be optionally specified. If a preference is given * the following hierarchy is used to allocate a block: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate a block in the same cylinder group. * 4) quadradically rehash into other cylinder groups, until an * available block is located. * If no block preference is given the following hierarchy is used * to allocate a block: * 1) allocate a block in the cylinder group that contains the * inode for the file. * 2) quadradically rehash into other cylinder groups, until an * available block is located. */ int ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, struct ucred *cred, e4fs_daddr_t *bnp) { struct m_ext2fs *fs; struct ext2mount *ump; e4fs_daddr_t bno; int cg; *bnp = 0; fs = ip->i_e2fs; ump = ip->i_ump; mtx_assert(EXT2_MTX(ump), MA_OWNED); #ifdef INVARIANTS if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); panic("ext2_alloc: bad size"); } if (cred == NOCRED) panic("ext2_alloc: missing credential"); #endif /* INVARIANTS */ - if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0) + if (size == fs->e2fs_bsize && fs->e2fs_fbcount == 0) goto nospace; if (cred->cr_uid != 0 && - fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount) + fs->e2fs_fbcount < fs->e2fs_rbcount) goto nospace; - if (bpref >= fs->e2fs->e2fs_bcount) + if (bpref >= fs->e2fs_bcount) bpref = 0; if (bpref == 0) cg = ino_to_cg(fs, ip->i_number); else cg = dtog(fs, bpref); bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, ext2_alloccg); if (bno > 0) { /* set next_alloc fields as done in block_getblk */ ip->i_next_alloc_block = lbn; ip->i_next_alloc_goal = bno; ip->i_blocks += btodb(fs->e2fs_bsize); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bnp = bno; return (0); } nospace: EXT2_UNLOCK(ump); ext2_fserr(fs, cred->cr_uid, "filesystem full"); uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); return (ENOSPC); } /* * Allocate EA's block for inode. */ e4fs_daddr_t ext2_alloc_meta(struct inode *ip) { struct m_ext2fs *fs; daddr_t blk; fs = ip->i_e2fs; EXT2_LOCK(ip->i_ump); blk = ext2_hashalloc(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize, ext2_alloccg); if (0 == blk) EXT2_UNLOCK(ip->i_ump); return (blk); } /* * Reallocate a sequence of blocks into a contiguous sequence of blocks. * * The vnode and an array of buffer pointers for a range of sequential * logical blocks to be made contiguous is given. The allocator attempts * to find a range of sequential blocks starting as close as possible to * an fs_rotdelay offset from the end of the allocation for the logical * block immediately preceding the current range. If successful, the * physical block numbers in the buffer pointers and in the inode are * changed to reflect the new allocation. If unsuccessful, the allocation * is left unchanged. The success in doing the reallocation is returned. * Note that the error return is not reflected back to the user. Rather * the previous block allocation will be used. */ static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); static int doasyncfree = 1; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "Use asychronous writes to update block pointers when freeing blocks"); static int doreallocblks = 0; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); int ext2_reallocblks(struct vop_reallocblks_args *ap) { struct m_ext2fs *fs; struct inode *ip; struct vnode *vp; struct buf *sbp, *ebp; uint32_t *bap, *sbap, *ebap; struct ext2mount *ump; struct cluster_save *buflist; struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; e2fs_lbn_t start_lbn, end_lbn; int soff; e2fs_daddr_t newblk, blkno; int i, len, start_lvl, end_lvl, pref, ssize; if (doreallocblks == 0) return (ENOSPC); vp = ap->a_vp; ip = VTOI(vp); fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_contigsumsize <= 0 || ip->i_flag & IN_E4EXTENTS) return (ENOSPC); buflist = ap->a_buflist; len = buflist->bs_nchildren; start_lbn = buflist->bs_children[0]->b_lblkno; end_lbn = start_lbn + len - 1; #ifdef INVARIANTS for (i = 1; i < len; i++) if (buflist->bs_children[i]->b_lblkno != start_lbn + i) panic("ext2_reallocblks: non-cluster"); #endif /* * If the cluster crosses the boundary for the first indirect * block, leave space for the indirect block. Indirect blocks * are initially laid out in a position after the last direct * block. Block reallocation would usually destroy locality by * moving the indirect block out of the way to make room for * data blocks if we didn't compensate here. We should also do * this for other indirect block boundaries, but it is only * important for the first one. */ if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) return (ENOSPC); /* * If the latest allocation is in a new cylinder group, assume that * the filesystem has decided to move and do not force it back to * the previous cylinder group. */ if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) return (ENOSPC); if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) return (ENOSPC); /* * Get the starting offset and block map for the first block. */ if (start_lvl == 0) { sbap = &ip->i_db[0]; soff = start_lbn; } else { idp = &start_ap[start_lvl - 1]; if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { brelse(sbp); return (ENOSPC); } sbap = (u_int *)sbp->b_data; soff = idp->in_off; } /* * If the block range spans two block maps, get the second map. */ ebap = NULL; if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { ssize = len; } else { #ifdef INVARIANTS if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) panic("ext2_reallocblks: start == end"); #endif ssize = len - (idp->in_off + 1); if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) goto fail; ebap = (u_int *)ebp->b_data; } /* * Find the preferred location for the cluster. */ EXT2_LOCK(ump); pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); /* * Search the block map looking for an allocation of the desired size. */ if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, len, ext2_clusteralloc)) == 0) { EXT2_UNLOCK(ump); goto fail; } /* * We have found a new contiguous block. * * First we have to replace the old block pointers with the new * block pointers in the inode and indirect blocks associated * with the file. */ #ifdef DEBUG printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn); #endif /* DEBUG */ blkno = newblk; for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { if (i == ssize) { bap = ebap; soff = -i; } #ifdef INVARIANTS if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) panic("ext2_reallocblks: alloc mismatch"); #endif #ifdef DEBUG printf(" %d,", *bap); #endif /* DEBUG */ *bap++ = blkno; } /* * Next we must write out the modified inode and indirect blocks. * For strict correctness, the writes should be synchronous since * the old block values may have been written to disk. In practise * they are almost never written, but if we are concerned about * strict correctness, the `doasyncfree' flag should be set to zero. * * The test on `doasyncfree' should be changed to test a flag * that shows whether the associated buffers and inodes have * been written. The flag should be set when the cluster is * started and cleared whenever the buffer or inode is flushed. * We can then check below to see if it is set, and do the * synchronous write only when it has been cleared. */ if (sbap != &ip->i_db[0]) { if (doasyncfree) bdwrite(sbp); else bwrite(sbp); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (!doasyncfree) ext2_update(vp, 1); } if (ssize < len) { if (doasyncfree) bdwrite(ebp); else bwrite(ebp); } /* * Last, free the old blocks and assign the new blocks to the buffers. */ #ifdef DEBUG printf("\n\tnew:"); #endif /* DEBUG */ for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->e2fs_bsize); buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); #ifdef DEBUG printf(" %d,", blkno); #endif /* DEBUG */ } #ifdef DEBUG printf("\n"); #endif /* DEBUG */ return (0); fail: if (ssize < len) brelse(ebp); if (sbap != &ip->i_db[0]) brelse(sbp); return (ENOSPC); } /* * Allocate an inode in the filesystem. * */ int ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) { struct timespec ts; struct inode *pip; struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; ino_t ino, ipref; int error, cg; *vpp = NULL; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; EXT2_LOCK(ump); if (fs->e2fs->e2fs_ficount == 0) goto noinodes; /* * If it is a directory then obtain a cylinder group based on * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is * always the next inode. */ if ((mode & IFMT) == IFDIR) { cg = ext2_dirpref(pip); if (fs->e2fs_contigdirs[cg] < 255) fs->e2fs_contigdirs[cg]++; } else { cg = ino_to_cg(fs, pip->i_number); if (fs->e2fs_contigdirs[cg] > 0) fs->e2fs_contigdirs[cg]--; } ipref = cg * fs->e2fs->e2fs_ipg + 1; ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); if (ino == 0) goto noinodes; error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { ext2_vfree(pvp, ino, mode); return (error); } ip = VTOI(*vpp); /* * The question is whether using VGET was such good idea at all: * Linux doesn't read the old inode in when it is allocating a * new one. I will set at least i_size and i_blocks to zero. */ ip->i_flag = 0; ip->i_size = 0; ip->i_blocks = 0; ip->i_mode = 0; ip->i_flags = 0; if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_EXTENTS) && (S_ISREG(mode) || S_ISDIR(mode))) ext4_ext_tree_init(ip); else memset(ip->i_data, 0, sizeof(ip->i_data)); /* * Set up a new generation number for this inode. * Avoid zero values. */ do { ip->i_gen = arc4random(); } while (ip->i_gen == 0); vfs_timestamp(&ts); ip->i_birthtime = ts.tv_sec; ip->i_birthnsec = ts.tv_nsec; /* printf("ext2_valloc: allocated inode %d\n", ino); */ return (0); noinodes: EXT2_UNLOCK(ump); ext2_fserr(fs, cred->cr_uid, "out of inodes"); uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); return (ENOSPC); } /* + * 64-bit compatible getters and setters for struct ext2_gd from ext2fs.h + */ +static uint64_t +e2fs_gd_get_b_bitmap(struct ext2_gd *gd) +{ + + return (((uint64_t)(gd->ext4bgd_b_bitmap_hi) << 32) | + gd->ext2bgd_b_bitmap); +} + +static uint64_t +e2fs_gd_get_i_bitmap(struct ext2_gd *gd) +{ + + return (((uint64_t)(gd->ext4bgd_i_bitmap_hi) << 32) | + gd->ext2bgd_i_bitmap); +} + +uint64_t +e2fs_gd_get_i_tables(struct ext2_gd *gd) +{ + + return (((uint64_t)(gd->ext4bgd_i_tables_hi) << 32) | + gd->ext2bgd_i_tables); +} + +static uint32_t +e2fs_gd_get_nbfree(struct ext2_gd *gd) +{ + + return (((uint32_t)(gd->ext4bgd_nbfree_hi) << 16) | + gd->ext2bgd_nbfree); +} + +static void +e2fs_gd_set_nbfree(struct ext2_gd *gd, uint32_t val) +{ + + gd->ext2bgd_nbfree = val & 0xffff; + gd->ext4bgd_nbfree_hi = val >> 16; +} + +static uint32_t +e2fs_gd_get_nifree(struct ext2_gd *gd) +{ + + return (((uint32_t)(gd->ext4bgd_nifree_hi) << 16) | + gd->ext2bgd_nifree); +} + +static void +e2fs_gd_set_nifree(struct ext2_gd *gd, uint32_t val) +{ + + gd->ext2bgd_nifree = val & 0xffff; + gd->ext4bgd_nifree_hi = val >> 16; +} + +uint32_t +e2fs_gd_get_ndirs(struct ext2_gd *gd) +{ + + return (((uint32_t)(gd->ext4bgd_ndirs_hi) << 16) | + gd->ext2bgd_ndirs); +} + +static void +e2fs_gd_set_ndirs(struct ext2_gd *gd, uint32_t val) +{ + + gd->ext2bgd_ndirs = val & 0xffff; + gd->ext4bgd_ndirs_hi = val >> 16; +} + +static uint32_t +e2fs_gd_get_i_unused(struct ext2_gd *gd) +{ + return (((uint32_t)(gd->ext4bgd_i_unused_hi) << 16) | + gd->ext4bgd_i_unused); +} + +static void +e2fs_gd_set_i_unused(struct ext2_gd *gd, uint32_t val) +{ + + gd->ext4bgd_i_unused = val & 0xffff; + gd->ext4bgd_i_unused_hi = val >> 16; +} + +/* * Find a cylinder to place a directory. * * The policy implemented by this algorithm is to allocate a * directory inode in the same cylinder group as its parent * directory, but also to reserve space for its files inodes * and data. Restrict the number of directories which may be * allocated one after another in the same cylinder group * without intervening allocation of files. * * If we allocate a first level directory then force allocation * in another cylinder group. * */ static u_long ext2_dirpref(struct inode *pip) { struct m_ext2fs *fs; int cg, prefcg, cgsize; - u_int avgifree, avgbfree, avgndir, curdirsize; - u_int minifree, minbfree, maxndir; + uint64_t avgbfree, minbfree; + u_int avgifree, avgndir, curdirsize; + u_int minifree, maxndir; u_int mincg, minndir; u_int dirsize, maxcontigdirs; mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); fs = pip->i_e2fs; avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; - avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount; + avgbfree = fs->e2fs_fbcount / fs->e2fs_gcount; avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; /* * Force allocation in another cg if creating a first level dir. */ ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); if (ITOV(pip)->v_vflag & VV_ROOT) { prefcg = arc4random() % fs->e2fs_gcount; mincg = prefcg; minndir = fs->e2fs_ipg; for (cg = prefcg; cg < fs->e2fs_gcount; cg++) - if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && - fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && - fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { + if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { mincg = cg; - minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; + minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); } for (cg = 0; cg < prefcg; cg++) - if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && - fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && - fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { + if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < minndir && + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree && + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= avgbfree) { mincg = cg; - minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; + minndir = e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]); } return (mincg); } /* * Count various limits which used for * optimal allocation of a directory inode. */ maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); minifree = avgifree - avgifree / 4; if (minifree < 1) minifree = 1; minbfree = avgbfree - avgbfree / 4; if (minbfree < 1) minbfree = 1; cgsize = fs->e2fs_fsize * fs->e2fs_fpg; dirsize = AVGDIRSIZE; curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; if (dirsize < curdirsize) dirsize = curdirsize; maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); if (maxcontigdirs == 0) maxcontigdirs = 1; /* * Limit number of dirs in one cg and reserve space for * regular files, but only if we have no deficit in * inodes or space. */ prefcg = ino_to_cg(fs, pip->i_number); for (cg = prefcg; cg < fs->e2fs_gcount; cg++) - if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && - fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && - fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { + if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } for (cg = 0; cg < prefcg; cg++) - if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && - fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && - fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { + if (e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) < maxndir && + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= minifree && + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } /* * This is a backstop when we have deficit in space. */ for (cg = prefcg; cg < fs->e2fs_gcount; cg++) - if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) + if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) return (cg); for (cg = 0; cg < prefcg; cg++) - if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) + if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) >= avgifree) break; return (cg); } /* * Select the desired position for the next block in a file. * * we try to mimic what Remy does in inode_getblk/block_getblk * * we note: blocknr == 0 means that we're about to allocate either * a direct block or a pointer block at the first level of indirection * (In other words, stuff that will go in i_db[] or i_ib[]) * * blocknr != 0 means that we're allocating a block that is none * of the above. Then, blocknr tells us the number of the block * that will hold the pointer */ e4fs_daddr_t ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, e2fs_daddr_t blocknr) { struct m_ext2fs *fs; int tmp; fs = ip->i_e2fs; mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); /* * If the next block is actually what we thought it is, then set the * goal to what we thought it should be. */ if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) return ip->i_next_alloc_goal; /* * Now check whether we were provided with an array that basically * tells us previous blocks to which we want to stay close. */ if (bap) for (tmp = indx - 1; tmp >= 0; tmp--) if (bap[tmp]) return bap[tmp]; /* * Else lets fall back to the blocknr or, if there is none, follow * the rule that a block should be allocated near its inode. */ return (blocknr ? blocknr : (e2fs_daddr_t)(ip->i_block_group * EXT2_BLOCKS_PER_GROUP(fs)) + fs->e2fs->e2fs_first_dblock); } /* * Implement the cylinder overflow algorithm. * * The policy implemented by this algorithm is: * 1) allocate the block in its requested cylinder group. * 2) quadradically rehash on the cylinder group number. * 3) brute force search for a free block. */ static e4fs_daddr_t ext2_hashalloc(struct inode *ip, int cg, long pref, int size, daddr_t (*allocator) (struct inode *, int, daddr_t, int)) { struct m_ext2fs *fs; e4fs_daddr_t result; int i, icg = cg; mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); fs = ip->i_e2fs; /* * 1: preferred cylinder group */ result = (*allocator)(ip, cg, pref, size); if (result) return (result); /* * 2: quadratic rehash */ for (i = 1; i < fs->e2fs_gcount; i *= 2) { cg += i; if (cg >= fs->e2fs_gcount) cg -= fs->e2fs_gcount; result = (*allocator)(ip, cg, 0, size); if (result) return (result); } /* * 3: brute force search * Note that we start at i == 2, since 0 was checked initially, * and 1 is always checked in the quadratic rehash. */ cg = (icg + 2) % fs->e2fs_gcount; for (i = 2; i < fs->e2fs_gcount; i++) { result = (*allocator)(ip, cg, 0, size); if (result) return (result); cg++; if (cg == fs->e2fs_gcount) cg = 0; } return (0); } static unsigned long ext2_cg_num_gdb(struct m_ext2fs *fs, int cg) { int gd_per_block, metagroup, first, last; gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); metagroup = cg / gd_per_block; first = metagroup * gd_per_block; last = first + gd_per_block - 1; if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || metagroup < fs->e2fs->e3fs_first_meta_bg) { if (!ext2_cg_has_sb(fs, cg)) return (0); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) return (fs->e2fs->e3fs_first_meta_bg); return (fs->e2fs_gdbcount); } if (cg == first || cg == first + 1 || cg == last) return (1); return (0); } static int ext2_num_base_meta_blocks(struct m_ext2fs *fs, int cg) { int num, gd_per_block; gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); num = ext2_cg_has_sb(fs, cg); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || cg < fs->e2fs->e3fs_first_meta_bg * gd_per_block) { if (num) { num += ext2_cg_num_gdb(fs, cg); num += fs->e2fs->e2fs_reserved_ngdb; } } else { num += ext2_cg_num_gdb(fs, cg); } return (num); } -static int -ext2_get_cg_number(struct m_ext2fs *fs, daddr_t blk) -{ - int cg; - - if (fs->e2fs->e2fs_bpg == fs->e2fs_bsize * 8) - cg = (blk - fs->e2fs->e2fs_first_dblock) / (fs->e2fs_bsize * 8); - else - cg = blk - fs->e2fs->e2fs_first_dblock; - - return (cg); -} - static void ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) { int i; if (start_bit >= end_bit) return; for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) setbit(bitmap, i); if (i < end_bit) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } static int ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) { int bit, bit_max, inodes_per_block; - uint32_t start, tmp; + uint64_t start, tmp; - if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || - !(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) + if (!(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) return (0); memset(bp->b_data, 0, fs->e2fs_bsize); bit_max = ext2_num_base_meta_blocks(fs, cg); if ((bit_max >> 3) >= fs->e2fs_bsize) return (EINVAL); for (bit = 0; bit < bit_max; bit++) setbit(bp->b_data, bit); - start = cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; + start = (uint64_t)cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; - /* Set bits for block and inode bitmaps, and inode table */ - tmp = fs->e2fs_gd[cg].ext2bgd_b_bitmap; + /* Set bits for block and inode bitmaps, and inode table. */ + tmp = e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg]); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || - tmp == ext2_get_cg_number(fs, cg)) + cg == dtogd(fs, tmp)) setbit(bp->b_data, tmp - start); - tmp = fs->e2fs_gd[cg].ext2bgd_i_bitmap; + tmp = e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg]); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || - tmp == ext2_get_cg_number(fs, cg)) + cg == dtogd(fs, tmp)) setbit(bp->b_data, tmp - start); - tmp = fs->e2fs_gd[cg].ext2bgd_i_tables; + tmp = e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]); inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); - while( tmp < fs->e2fs_gd[cg].ext2bgd_i_tables + + while( tmp < e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + fs->e2fs->e2fs_ipg / inodes_per_block ) { if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || - tmp == ext2_get_cg_number(fs, cg)) + cg == dtogd(fs, tmp)) setbit(bp->b_data, tmp - start); tmp++; } /* * Also if the number of blocks within the group is less than * the blocksize * 8 ( which is the size of bitmap ), set rest * of the block bitmap to 1 */ ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, bp->b_data); /* Clean the flag */ fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; return (0); } /* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */ static daddr_t ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; daddr_t bno, runstart, runlen; int bit, loc, end, error, start; char *bbp; /* XXX ondisk32 */ fs = ip->i_e2fs; ump = ip->i_ump; - if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) + if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, - fs->e2fs_gd[cg].ext2bgd_b_bitmap), + e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { error = ext2_cg_block_bitmap_init(fs, cg, bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } } - if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) { + if (e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) == 0) { /* * Another thread allocated the last block in this * group while we were waiting for the buffer. */ brelse(bp); EXT2_LOCK(ump); return (0); } bbp = (char *)bp->b_data; if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) { bpref = dtogd(fs, bpref); /* * if the requested block is available, use it */ if (isclr(bbp, bpref)) { bno = bpref; goto gotit; } } /* * no blocks in the requested cylinder, so take next * available one in this cylinder group. * first try to get 8 contigous blocks, then fall back to a single * block. */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; retry: runlen = 0; runstart = 0; for (loc = start; loc < end; loc++) { if (bbp[loc] == (char)0xff) { runlen = 0; continue; } /* Start of a run, find the number of high clear bits. */ if (runlen == 0) { bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } else if (bbp[loc] == 0) { /* Continue a run. */ runlen += NBBY; } else { /* * Finish the current run. If it isn't long * enough, start a new one. */ bit = ffs(bbp[loc]) - 1; runlen += bit; if (runlen >= 8) { bno = runstart; goto gotit; } /* Run was too short, start a new one. */ bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } /* If the current run is long enough, use it. */ if (runlen >= 8) { bno = runstart; goto gotit; } } if (start != 0) { end = start; start = 0; goto retry; } bno = ext2_mapsearch(fs, bbp, bpref); if (bno < 0) { brelse(bp); EXT2_LOCK(ump); return (0); } gotit: #ifdef INVARIANTS if (isset(bbp, bno)) { printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", cg, (intmax_t)bno, fs->e2fs_fsmnt); panic("ext2fs_alloccg: dup alloc"); } #endif setbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, -1); - fs->e2fs->e2fs_fbcount--; - fs->e2fs_gd[cg].ext2bgd_nbfree--; + fs->e2fs_fbcount--; + e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); - return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); + return (((uint64_t)cg) * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); } /* * Determine whether a cluster can be allocated. */ static daddr_t ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp; char *bbp; int bit, error, got, i, loc, run; int32_t *lp; daddr_t bno; fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_maxcluster[cg] < len) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, - fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), + fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) goto fail_lock; bbp = (char *)bp->b_data; EXT2_LOCK(ump); /* * Check to see if a cluster of the needed size (or bigger) is * available in this cylinder group. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len]; for (i = len; i <= fs->e2fs_contigsumsize; i++) if (*lp++ > 0) break; if (i > fs->e2fs_contigsumsize) { /* * Update the cluster summary information to reflect * the true maximum-sized cluster so that future cluster * allocation requests can avoid reading the bitmap only * to find no cluster. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; for (i = len - 1; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; goto fail; } EXT2_UNLOCK(ump); /* Search the bitmap to find a big enough cluster like in FFS. */ if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) bpref = dtogd(fs, bpref); loc = bpref / NBBY; bit = 1 << (bpref % NBBY); for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { if ((bbp[loc] & bit) != 0) run = 0; else { run++; if (run == len) break; } if ((got & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } if (got >= fs->e2fs->e2fs_fpg) goto fail_lock; /* Allocate the cluster that we found. */ for (i = 1; i < len; i++) if (!isclr(bbp, got - run + i)) panic("ext2_clusteralloc: map mismatch"); bno = got - run + 1; if (bno >= fs->e2fs->e2fs_fpg) panic("ext2_clusteralloc: allocated out of group"); EXT2_LOCK(ump); for (i = 0; i < len; i += fs->e2fs_fpb) { setbit(bbp, bno + i); ext2_clusteracct(fs, bbp, cg, bno + i, -1); - fs->e2fs->e2fs_fbcount--; - fs->e2fs_gd[cg].ext2bgd_nbfree--; + fs->e2fs_fbcount--; + e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) - 1); } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); fail_lock: EXT2_LOCK(ump); fail: brelse(bp); return (0); } static int ext2_zero_inode_table(struct inode *ip, int cg) { struct m_ext2fs *fs; struct buf *bp; int i, all_blks, used_blks; fs = ip->i_e2fs; if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) return (0); all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / fs->e2fs_bsize; used_blks = howmany(fs->e2fs->e2fs_ipg - - fs->e2fs_gd[cg].ext4bgd_i_unused, + e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]), fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); for (i = 0; i < all_blks - used_blks; i++) { bp = getblk(ip->i_devvp, fsbtodb(fs, - fs->e2fs_gd[cg].ext2bgd_i_tables + used_blks + i), + e2fs_gd_get_i_tables(&fs->e2fs_gd[cg]) + used_blks + i), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); bawrite(bp); } fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; return (0); } /* * Determine whether an inode can be allocated. * * Check to see if an inode is available, and if it is, * allocate it using tode in the specified cylinder group. */ static daddr_t ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int error, start, len; char *ibp, *loc; ipref--; /* to avoid a lot of (ipref -1) */ if (ipref == -1) ipref = 0; fs = ip->i_e2fs; ump = ip->i_ump; - if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) + if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, - fs->e2fs_gd[cg].ext2bgd_i_bitmap), + e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { memset(bp->b_data, 0, fs->e2fs_bsize); fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; } error = ext2_zero_inode_table(ip, cg); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } } - if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) { + if (e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) == 0) { /* * Another thread allocated the last i-node in this * group while we were waiting for the buffer. */ brelse(bp); EXT2_LOCK(ump); return (0); } ibp = (char *)bp->b_data; if (ipref) { ipref %= fs->e2fs->e2fs_ipg; if (isclr(ibp, ipref)) goto gotit; } start = ipref / NBBY; len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { printf("cg = %d, ipref = %lld, fs = %s\n", cg, (long long)ipref, fs->e2fs_fsmnt); panic("ext2fs_nodealloccg: map corrupted"); /* NOTREACHED */ } } ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; gotit: setbit(ibp, ipref); EXT2_LOCK(ump); - fs->e2fs_gd[cg].ext2bgd_nifree--; + e2fs_gd_set_nifree(&fs->e2fs_gd[cg], + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) - 1); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) - fs->e2fs_gd[cg].ext4bgd_i_unused--; + e2fs_gd_set_i_unused(&fs->e2fs_gd[cg], + e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]) - 1); fs->e2fs->e2fs_ficount--; fs->e2fs_fmod = 1; if ((mode & IFMT) == IFDIR) { - fs->e2fs_gd[cg].ext2bgd_ndirs++; + e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], + e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) + 1); fs->e2fs_total_dir++; } EXT2_UNLOCK(ump); bdwrite(bp); - return (cg * fs->e2fs->e2fs_ipg + ipref + 1); + return ((uint64_t)cg * fs->e2fs_ipg + ipref + 1); } /* * Free a block or fragment. * */ void ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int cg, error; char *bbp; fs = ip->i_e2fs; ump = ip->i_ump; cg = dtog(fs, bno); - if (bno >= fs->e2fs->e2fs_bcount) { + if (bno >= fs->e2fs_bcount) { printf("bad block %lld, ino %ju\n", (long long)bno, (uintmax_t)ip->i_number); ext2_fserr(fs, ip->i_uid, "bad block"); return; } error = bread(ip->i_devvp, - fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), + fsbtodb(fs, e2fs_gd_get_b_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return; } bbp = (char *)bp->b_data; bno = dtogd(fs, bno); if (isclr(bbp, bno)) { printf("block = %lld, fs = %s\n", (long long)bno, fs->e2fs_fsmnt); panic("ext2_blkfree: freeing free block"); } clrbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, 1); - fs->e2fs->e2fs_fbcount++; - fs->e2fs_gd[cg].ext2bgd_nbfree++; + fs->e2fs_fbcount++; + e2fs_gd_set_nbfree(&fs->e2fs_gd[cg], + e2fs_gd_get_nbfree(&fs->e2fs_gd[cg]) + 1); fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); } /* * Free an inode. * */ int ext2_vfree(struct vnode *pvp, ino_t ino, int mode) { struct m_ext2fs *fs; struct inode *pip; struct buf *bp; struct ext2mount *ump; int error, cg; char *ibp; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); cg = ino_to_cg(fs, ino); error = bread(pip->i_devvp, - fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), + fsbtodb(fs, e2fs_gd_get_i_bitmap(&fs->e2fs_gd[cg])), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (0); } ibp = (char *)bp->b_data; ino = (ino - 1) % fs->e2fs->e2fs_ipg; if (isclr(ibp, ino)) { printf("ino = %llu, fs = %s\n", (unsigned long long)ino, fs->e2fs_fsmnt); if (fs->e2fs_ronly == 0) panic("ext2_vfree: freeing free inode"); } clrbit(ibp, ino); EXT2_LOCK(ump); fs->e2fs->e2fs_ficount++; - fs->e2fs_gd[cg].ext2bgd_nifree++; + e2fs_gd_set_nifree(&fs->e2fs_gd[cg], + e2fs_gd_get_nifree(&fs->e2fs_gd[cg]) + 1); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) - fs->e2fs_gd[cg].ext4bgd_i_unused++; + e2fs_gd_set_i_unused(&fs->e2fs_gd[cg], + e2fs_gd_get_i_unused(&fs->e2fs_gd[cg]) + 1); if ((mode & IFMT) == IFDIR) { - fs->e2fs_gd[cg].ext2bgd_ndirs--; + e2fs_gd_set_ndirs(&fs->e2fs_gd[cg], + e2fs_gd_get_ndirs(&fs->e2fs_gd[cg]) - 1); fs->e2fs_total_dir--; } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); return (0); } /* * Find a block in the specified cylinder group. * * It is a panic if a request is made to find a block if none are * available. */ static daddr_t ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) { char *loc; int start, len; /* * find the fragment by searching through the free block * map for an appropriate bit pattern */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { printf("start = %d, len = %d, fs = %s\n", start, len, fs->e2fs_fsmnt); panic("ext2_mapsearch: map corrupted"); /* NOTREACHED */ } } return ((loc - bbp) * NBBY + ffs(~*loc) - 1); } /* * Fserr prints the name of a filesystem with an error diagnostic. * * The form of the error message is: * fs: error message */ void ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) { log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); } int ext2_cg_has_sb(struct m_ext2fs *fs, int cg) { int a3, a5, a7; if (cg == 0) return (1); if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { if (cg == fs->e2fs->e4fs_backup_bgs[0] || cg == fs->e2fs->e4fs_backup_bgs[1]) return (1); return (0); } if ((cg <= 1) || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) return (1); if (!(cg & 1)) return (0); for (a3 = 3, a5 = 5, a7 = 7; a3 <= cg || a5 <= cg || a7 <= cg; a3 *= 3, a5 *= 5, a7 *= 7) if (cg == a3 || cg == a5 || cg == a7) return (1); return (0); } Index: head/sys/fs/ext2fs/ext2_balloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_balloc.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_balloc.c (revision 327584) @@ -1,376 +1,383 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_balloc.c 8.4 (Berkeley) 9/23/93 * $FreeBSD$ */ #include #include #include #include +#include #include #include #include #include #include #include #include #include #include static int ext2_ext_balloc(struct inode *ip, uint32_t lbn, int size, struct ucred *cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; struct buf *bp = NULL; struct vnode *vp = ITOV(ip); - uint32_t nb; + daddr_t newblk; int osize, nsize, blks, error, allocated; fs = ip->i_e2fs; blks = howmany(size, fs->e2fs_bsize); - error = ext4_ext_get_blocks(ip, lbn, blks, cred, NULL, &allocated, &nb); + error = ext4_ext_get_blocks(ip, lbn, blks, cred, NULL, &allocated, &newblk); if (error) return (error); if (allocated) { if (ip->i_size < (lbn + 1) * fs->e2fs_bsize) nsize = fragroundup(fs, size); else nsize = fs->e2fs_bsize; bp = getblk(vp, lbn, nsize, 0, 0, 0); if(!bp) return (EIO); - bp->b_blkno = fsbtodb(fs, nb); + bp->b_blkno = fsbtodb(fs, newblk); if (flags & BA_CLRBUF) vfs_bio_clrbuf(bp); } else { if (ip->i_size >= (lbn + 1) * fs->e2fs_bsize) { error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } - bp->b_blkno = fsbtodb(fs, nb); + bp->b_blkno = fsbtodb(fs, newblk); *bpp = bp; return (0); } /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); - if (nsize <= osize) { + if (nsize <= osize) error = bread(vp, lbn, osize, NOCRED, &bp); - if (error) { - brelse(bp); - return (error); - } - bp->b_blkno = fsbtodb(fs, nb); - } else { + else error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); - if (error) { - brelse(bp); - return (error); - } - bp->b_blkno = fsbtodb(fs, nb); + if (error) { + brelse(bp); + return (error); } + bp->b_blkno = fsbtodb(fs, newblk); } *bpp = bp; return (error); } /* * Balloc defines the structure of filesystem storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ int ext2_balloc(struct inode *ip, e2fs_lbn_t lbn, int size, struct ucred *cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[EXT2_NIADDR + 2]; e4fs_daddr_t nb, newb; e2fs_daddr_t *bap, pref; int osize, nsize, num, i, error; *bpp = NULL; if (lbn < 0) return (EFBIG); fs = ip->i_e2fs; ump = ip->i_ump; /* * check if this is a sequential block allocation. * If so, increment next_alloc fields to allow ext2_blkpref * to make a good guess */ if (lbn == ip->i_next_alloc_block + 1) { ip->i_next_alloc_block++; ip->i_next_alloc_goal++; } if (ip->i_flag & IN_E4EXTENTS) return (ext2_ext_balloc(ip, lbn, size, cred, bpp, flags)); /* * The first EXT2_NDADDR blocks are direct blocks */ if (lbn < EXT2_NDADDR) { nb = ip->i_db[lbn]; /* * no new block is to be allocated, and no need to expand * the file */ if (nb != 0 && ip->i_size >= (lbn + 1) * fs->e2fs_bsize) { error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = fsbtodb(fs, nb); *bpp = bp; return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { error = bread(vp, lbn, osize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = fsbtodb(fs, nb); } else { /* * Godmar thinks: this shouldn't happen w/o * fragments */ printf("nsize %d(%d) > osize %d(%d) nb %d\n", (int)nsize, (int)size, (int)osize, (int)ip->i_size, (int)nb); panic( "ext2_balloc: Something is terribly wrong"); /* * please note there haven't been any changes from here on - * FFS seems to work. */ } } else { if (ip->i_size < (lbn + 1) * fs->e2fs_bsize) nsize = fragroundup(fs, size); else nsize = fs->e2fs_bsize; EXT2_LOCK(ump); error = ext2_alloc(ip, lbn, ext2_blkpref(ip, lbn, (int)lbn, &ip->i_db[0], 0), nsize, cred, &newb); if (error) return (error); + /* + * If the newly allocated block exceeds 32-bit limit, + * we can not use it in file block maps. + */ + if (newb > UINT_MAX) + return (EFBIG); bp = getblk(vp, lbn, nsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(bp); } ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bpp = bp; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ext2_getlbns(vp, lbn, indirs, &num)) != 0) return (error); #ifdef INVARIANTS if (num < 1) panic("ext2_balloc: ext2_getlbns returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ib[indirs[0].in_off]; if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[0].in_off + EXT2_NDIR_BLOCKS, &ip->i_db[0], 0); if ((error = ext2_alloc(ip, lbn, pref, fs->e2fs_bsize, cred, &newb))) return (error); + if (newb > UINT_MAX) + return (EFBIG); nb = newb; bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); vfs_bio_clrbuf(bp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); return (error); } ip->i_ib[indirs[0].in_off] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bap = (e2fs_daddr_t *)bp->b_data; nb = bap[indirs[i].in_off]; if (i == num) break; i += 1; if (nb != 0) { bqrelse(bp); continue; } EXT2_LOCK(ump); if (pref == 0) pref = ext2_blkpref(ip, lbn, indirs[i].in_off, bap, bp->b_lblkno); error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb); if (error) { brelse(bp); return (error); } + if (newb > UINT_MAX) + return (EFBIG); nb = newb; nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); vfs_bio_clrbuf(nbp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); EXT2_UNLOCK(ump); brelse(bp); return (error); } bap[indirs[i - 1].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[i].in_off, &bap[0], bp->b_lblkno); if ((error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb)) != 0) { brelse(bp); return (error); } + if (newb > UINT_MAX) + return (EFBIG); nb = newb; nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(nbp); bap[indirs[i].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } *bpp = nbp; return (0); } brelse(bp); if (flags & BA_CLRBUF) { int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT; if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, (int)fs->e2fs_bsize, NOCRED, MAXBSIZE, seqcount, 0, &nbp); } else { error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp); } if (error) { brelse(nbp); return (error); } } else { nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); } *bpp = nbp; return (0); } Index: head/sys/fs/ext2fs/ext2_csum.c =================================================================== --- head/sys/fs/ext2fs/ext2_csum.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_csum.c (revision 327584) @@ -1,149 +1,149 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017, Fedor Uporov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static uint16_t ext2_crc16(uint16_t crc, const void *buffer, unsigned int len) { const unsigned char *cp = buffer; /* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */ static uint16_t const crc16_table[256] = { 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 }; while (len--) crc = (((crc >> 8) & 0xffU) ^ crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU; return crc; } static uint16_t ext2_gd_csum(struct m_ext2fs *fs, uint32_t block_group, struct ext2_gd *gd) { size_t offset; uint16_t crc; offset = offsetof(struct ext2_gd, ext4bgd_csum); if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { crc = ext2_crc16(~0, fs->e2fs->e2fs_uuid, sizeof(fs->e2fs->e2fs_uuid)); crc = ext2_crc16(crc, (uint8_t *)&block_group, sizeof(block_group)); crc = ext2_crc16(crc, (uint8_t *)gd, offset); offset += sizeof(gd->ext4bgd_csum); /* skip checksum */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT) && offset < fs->e2fs->e3fs_desc_size) crc = ext2_crc16(crc, (uint8_t *)gd + offset, - fs->e2fs->e3fs_desc_size - offset); + fs->e2fs->e3fs_desc_size - offset); return (crc); } return (0); } int ext2_gd_csum_verify(struct m_ext2fs *fs, struct cdev *dev) { unsigned int i; int error = 0; for (i = 0; i < fs->e2fs_gcount; i++) { if (fs->e2fs_gd[i].ext4bgd_csum != ext2_gd_csum(fs, i, &fs->e2fs_gd[i])) { printf( "WARNING: mount of %s denied due bad gd=%d csum=0x%x, expected=0x%x - run fsck\n", devtoname(dev), i, fs->e2fs_gd[i].ext4bgd_csum, ext2_gd_csum(fs, i, &fs->e2fs_gd[i])); error = EINVAL; break; } } return (error); } void ext2_gd_csum_set(struct m_ext2fs *fs) { unsigned int i; for (i = 0; i < fs->e2fs_gcount; i++) fs->e2fs_gd[i].ext4bgd_csum = ext2_gd_csum(fs, i, &fs->e2fs_gd[i]); } Index: head/sys/fs/ext2fs/ext2_extents.c =================================================================== --- head/sys/fs/ext2fs/ext2_extents.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_extents.c (revision 327584) @@ -1,1583 +1,1583 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents"); #ifdef EXT2FS_DEBUG static void ext4_ext_print_extent(struct ext4_extent *ep) { printf(" ext %p => (blk %u len %u start %lu)\n", ep, ep->e_blk, ep->e_len, (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo); } static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp); static void ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk) { struct m_ext2fs *fs; struct buf *bp; int error; fs = ip->i_e2fs; printf(" index %p => (blk %u pblk %lu)\n", ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo); if(!do_walk) return; if ((error = bread(ip->i_devvp, fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return; } ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data); brelse(bp); } static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp) { int i; printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n", ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, ehp->eh_gen); for (i = 0; i < ehp->eh_ecount; i++) if (ehp->eh_depth != 0) ext4_ext_print_index(ip, (struct ext4_extent_index *)(ehp + 1 + i), 1); else ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i)); } static void ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path) { int k, l; l = path->ep_depth printf("ip=%d, Path:\n", ip->i_number); for (k = 0; k <= l; k++, path++) { if (path->ep_index) { ext4_ext_print_index(ip, path->ep_index, 0); } else if (path->ep_ext) { ext4_ext_print_extent(path->ep_ext); } } } void ext4_ext_print_extent_tree_status(struct inode * ip) { struct m_ext2fs *fs; struct ext4_extent_header *ehp; fs = ip->i_e2fs; ehp = (struct ext4_extent_header *)(char *)ip->i_db; printf("Extent status:ip=%d\n", ip->i_number); if (!(ip->i_flag & IN_E4EXTENTS)) return; ext4_ext_print_header(ip, ehp); return; } #endif static inline struct ext4_extent_header * ext4_ext_inode_header(struct inode *ip) { return ((struct ext4_extent_header *)ip->i_db); } static inline struct ext4_extent_header * ext4_ext_block_header(char *bdata) { return ((struct ext4_extent_header *)bdata); } static inline unsigned short ext4_ext_inode_depth(struct inode *ip) { struct ext4_extent_header *ehp; ehp = (struct ext4_extent_header *)ip->i_data; return (ehp->eh_depth); } static inline e4fs_daddr_t ext4_ext_index_pblock(struct ext4_extent_index *index) { e4fs_daddr_t blk; blk = index->ei_leaf_lo; blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32; return (blk); } static inline void ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb) { index->ei_leaf_lo = pb & 0xffffffff; index->ei_leaf_hi = (pb >> 32) & 0xffff; } static inline e4fs_daddr_t ext4_ext_extent_pblock(struct ext4_extent *extent) { e4fs_daddr_t blk; blk = extent->e_start_lo; blk |= (e4fs_daddr_t)extent->e_start_hi << 32; return (blk); } static inline void ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb) { ex->e_start_lo = pb & 0xffffffff; ex->e_start_hi = (pb >> 32) & 0xffff; } int ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep) { struct ext4_extent_cache *ecp; int ret = EXT4_EXT_CACHE_NO; ecp = &ip->i_ext_cache; if (ecp->ec_type == EXT4_EXT_CACHE_NO) return (ret); if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) { ep->e_blk = ecp->ec_blk; ep->e_start_lo = ecp->ec_start & 0xffffffff; ep->e_start_hi = ecp->ec_start >> 32 & 0xffff; ep->e_len = ecp->ec_len; ret = ecp->ec_type; } return (ret); } static int ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh) { struct m_ext2fs *fs; char *error_msg; fs = ip->i_e2fs; if (eh->eh_magic != EXT4_EXT_MAGIC) { error_msg = "invalid magic"; goto corrupted; } if (eh->eh_max == 0) { error_msg = "invalid eh_max"; goto corrupted; } if (eh->eh_ecount > eh->eh_max) { error_msg = "invalid eh_entries"; goto corrupted; } return (0); corrupted: ext2_fserr(fs, ip->i_uid, error_msg); return (EIO); } static void ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk) { struct ext4_extent_header *eh; struct ext4_extent_index *r, *l, *m; eh = path->ep_header; KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0, ("ext4_ext_binsearch_index: bad args")); l = EXT_FIRST_INDEX(eh) + 1; r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1; while (l <= r) { m = l + (r - l) / 2; if (blk < m->ei_blk) r = m - 1; else l = m + 1; } path->ep_index = l - 1; } static void ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk) { struct ext4_extent_header *eh; struct ext4_extent *r, *l, *m; eh = path->ep_header; KASSERT(eh->eh_ecount <= eh->eh_max, ("ext4_ext_binsearch_ext: bad args")); if (eh->eh_ecount == 0) return; l = EXT_FIRST_EXTENT(eh) + 1; r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1; while (l <= r) { m = l + (r - l) / 2; if (blk < m->e_blk) r = m - 1; else l = m + 1; } path->ep_ext = l - 1; } static int ext4_ext_fill_path_bdata(struct ext4_extent_path *path, struct buf *bp, uint64_t blk) { KASSERT(path->ep_data == NULL, ("ext4_ext_fill_path_bdata: bad ep_data")); path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK); if (!path->ep_data) return (ENOMEM); memcpy(path->ep_data, bp->b_data, bp->b_bufsize); path->ep_blk = blk; return (0); } static void ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp) { KASSERT(path->ep_data != NULL, ("ext4_ext_fill_path_buf: bad ep_data")); memcpy(bp->b_data, path->ep_data, bp->b_bufsize); } static void ext4_ext_drop_refs(struct ext4_extent_path *path) { int depth, i; if (!path) return; depth = path->ep_depth; for (i = 0; i <= depth; i++, path++) if (path->ep_data) { free(path->ep_data, M_EXT2EXTENTS); path->ep_data = NULL; } } void ext4_ext_path_free(struct ext4_extent_path *path) { if (!path) return; ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); } int ext4_ext_find_extent(struct inode *ip, daddr_t block, struct ext4_extent_path **ppath) { struct m_ext2fs *fs; struct ext4_extent_header *eh; struct ext4_extent_path *path; struct buf *bp; uint64_t blk; int error, depth, i, ppos, alloc; fs = ip->i_e2fs; eh = ext4_ext_inode_header(ip); depth = ext4_ext_inode_depth(ip); ppos = 0; alloc = 0; error = ext4_ext_check_header(ip, eh); if (error) return (error); if (ppath == NULL) return (EINVAL); path = *ppath; if (path == NULL) { path = malloc(EXT4_EXT_DEPTH_MAX * sizeof(struct ext4_extent_path), M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!path) return (ENOMEM); *ppath = path; alloc = 1; } path[0].ep_header = eh; path[0].ep_data = NULL; /* Walk through the tree. */ i = depth; while (i) { ext4_ext_binsearch_index(&path[ppos], block); blk = ext4_ext_index_pblock(path[ppos].ep_index); path[ppos].ep_depth = i; path[ppos].ep_ext = NULL; error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk), ip->i_e2fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); goto error; } ppos++; if (ppos > depth) { ext2_fserr(fs, ip->i_uid, "ppos > depth => extent corrupted"); error = EIO; brelse(bp); goto error; } ext4_ext_fill_path_bdata(&path[ppos], bp, blk); bqrelse(bp); eh = ext4_ext_block_header(path[ppos].ep_data); error = ext4_ext_check_header(ip, eh); if (error) goto error; path[ppos].ep_header = eh; i--; } error = ext4_ext_check_header(ip, eh); if (error) goto error; /* Find extent. */ path[ppos].ep_depth = i; path[ppos].ep_header = eh; path[ppos].ep_ext = NULL; path[ppos].ep_index = NULL; ext4_ext_binsearch_ext(&path[ppos], block); return (0); error: ext4_ext_drop_refs(path); if (alloc) free(path, M_EXT2EXTENTS); *ppath = NULL; return (error); } static inline int ext4_ext_space_root(struct inode *ip) { int size; size = sizeof(ip->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent); return (size); } static inline int ext4_ext_space_block(struct inode *ip) { struct m_ext2fs *fs; int size; fs = ip->i_e2fs; size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent); return (size); } static inline int ext4_ext_space_block_index(struct inode *ip) { struct m_ext2fs *fs; int size; fs = ip->i_e2fs; size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_index); return (size); } void ext4_ext_tree_init(struct inode *ip) { struct ext4_extent_header *ehp; ip->i_flag |= IN_E4EXTENTS; memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR); ehp = (struct ext4_extent_header *)ip->i_data; ehp->eh_magic = EXT4_EXT_MAGIC; ehp->eh_max = ext4_ext_space_root(ip); ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } static inline void ext4_ext_put_in_cache(struct inode *ip, uint32_t blk, uint32_t len, uint32_t start, int type) { KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input")); ip->i_ext_cache.ec_type = type; ip->i_ext_cache.ec_blk = blk; ip->i_ext_cache.ec_len = len; ip->i_ext_cache.ec_start = start; } static e4fs_daddr_t ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path, e4fs_daddr_t block) { struct m_ext2fs *fs; struct ext4_extent *ex; e4fs_daddr_t bg_start; int depth; fs = ip->i_e2fs; if (path) { depth = path->ep_depth; ex = path[depth].ep_ext; if (ex) { e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex); e2fs_daddr_t blk = ex->e_blk; if (block > blk) return (pblk + (block - blk)); else return (pblk - (blk - block)); } /* Try to get block from index itself. */ if (path[depth].ep_data) return (path[depth].ep_blk); } /* Use inode's group. */ bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + fs->e2fs->e2fs_first_dblock; return (bg_start + block); } static int inline ext4_can_extents_be_merged(struct ext4_extent *ex1, struct ext4_extent *ex2) { if (ex1->e_blk + ex1->e_len != ex2->e_blk) return (0); if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN) return (0); if (ext4_ext_extent_pblock(ex1) + ex1->e_len == ext4_ext_extent_pblock(ex2)) return (1); return (0); } static unsigned ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path) { int depth = path->ep_depth; /* Empty tree */ if (depth == 0) return (EXT4_MAX_BLOCKS); /* Go to indexes. */ depth--; while (depth >= 0) { if (path[depth].ep_index != EXT_LAST_INDEX(path[depth].ep_header)) return (path[depth].ep_index[1].ei_blk); depth--; } return (EXT4_MAX_BLOCKS); } static int ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path) { struct m_ext2fs *fs; struct buf *bp; uint64_t blk; int error; fs = ip->i_e2fs; if (!path) return (EINVAL); if (path->ep_data) { blk = path->ep_blk; bp = getblk(ip->i_devvp, fsbtodb(fs, blk), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); ext4_ext_fill_path_buf(path, bp); error = bwrite(bp); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; error = ext2_update(ip->i_vnode, 1); } return (error); } static int ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path, uint32_t lblk, e4fs_daddr_t blk) { struct m_ext2fs *fs; struct ext4_extent_index *idx; int len; fs = ip->i_e2fs; if (lblk == path->ep_index->ei_blk) { ext2_fserr(fs, ip->i_uid, "lblk == index blk => extent corrupted"); return (EIO); } if (path->ep_header->eh_ecount >= path->ep_header->eh_max) { ext2_fserr(fs, ip->i_uid, "ecout > maxcount => extent corrupted"); return (EIO); } if (lblk > path->ep_index->ei_blk) { /* Insert after. */ idx = path->ep_index + 1; } else { /* Insert before. */ idx = path->ep_index; } len = EXT_LAST_INDEX(path->ep_header) - idx + 1; if (len > 0) memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index)); if (idx > EXT_MAX_INDEX(path->ep_header)) { ext2_fserr(fs, ip->i_uid, "index is out of range => extent corrupted"); return (EIO); } idx->ei_blk = lblk; ext4_index_store_pblock(idx, blk); path->ep_header->eh_ecount++; return (ext4_ext_dirty(ip, path)); } static e4fs_daddr_t ext4_ext_alloc_meta(struct inode *ip) { e4fs_daddr_t blk = ext2_alloc_meta(ip); if (blk) { ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize); ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } return (blk); } static void ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags) { struct m_ext2fs *fs; int i, blocksreleased; fs = ip->i_e2fs; blocksreleased = count; for(i = 0; i < count; i++) ext2_blkfree(ip, blk + i, fs->e2fs_bsize); if (ip->i_blocks >= blocksreleased) ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased); else ip->i_blocks = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } static int ext4_ext_split(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext, int at) { struct m_ext2fs *fs; struct buf *bp; int depth = ext4_ext_inode_depth(ip); struct ext4_extent_header *neh; struct ext4_extent_index *fidx; struct ext4_extent *ex; int i = at, k, m, a; e4fs_daddr_t newblk, oldblk; uint32_t border; e4fs_daddr_t *ablks = NULL; int error = 0; fs = ip->i_e2fs; bp = NULL; /* * We will split at current extent for now. */ if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) { ext2_fserr(fs, ip->i_uid, "extent is out of range => extent corrupted"); return (EIO); } if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header)) border = path[depth].ep_ext[1].e_blk; else border = newext->e_blk; /* Allocate new blocks. */ ablks = malloc(sizeof(e4fs_daddr_t) * depth, M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!ablks) return (ENOMEM); for (a = 0; a < depth - at; a++) { newblk = ext4_ext_alloc_meta(ip); if (newblk == 0) goto cleanup; ablks[a] = newblk; } newblk = ablks[--a]; bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); if (!bp) { error = EIO; goto cleanup; } neh = ext4_ext_block_header(bp->b_data); neh->eh_ecount = 0; neh->eh_max = ext4_ext_space_block(ip); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; ex = EXT_FIRST_EXTENT(neh); if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) { ext2_fserr(fs, ip->i_uid, "extents count out of range => extent corrupted"); error = EIO; goto cleanup; } /* Start copy from next extent. */ m = 0; path[depth].ep_ext++; while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) { path[depth].ep_ext++; m++; } if (m) { memmove(ex, path[depth].ep_ext - m, sizeof(struct ext4_extent) * m); neh->eh_ecount = neh->eh_ecount + m; } bwrite(bp); bp = NULL; /* Fix old leaf. */ if (m) { path[depth].ep_header->eh_ecount = path[depth].ep_header->eh_ecount - m; ext4_ext_dirty(ip, path + depth); } /* Create intermediate indexes. */ k = depth - at - 1; KASSERT(k >= 0, ("ext4_ext_split: negative k")); /* Insert new index into current index block. */ i = depth - 1; while (k--) { oldblk = newblk; newblk = ablks[--a]; error = bread(ip->i_devvp, fsbtodb(fs, newblk), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); goto cleanup; } neh = (struct ext4_extent_header *)bp->b_data; neh->eh_ecount = 1; neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_max = ext4_ext_space_block_index(ip); neh->eh_depth = depth - i; fidx = EXT_FIRST_INDEX(neh); fidx->ei_blk = border; ext4_index_store_pblock(fidx, oldblk); m = 0; path[i].ep_index++; while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) { path[i].ep_index++; m++; } if (m) { memmove(++fidx, path[i].ep_index - m, sizeof(struct ext4_extent_index) * m); neh->eh_ecount = neh->eh_ecount + m; } bwrite(bp); bp = NULL; /* Fix old index. */ if (m) { path[i].ep_header->eh_ecount = path[i].ep_header->eh_ecount - m; ext4_ext_dirty(ip, path + i); } i--; } error = ext4_ext_insert_index(ip, path + at, border, newblk); cleanup: if (bp) brelse(bp); if (error) { for (i = 0; i < depth; i++) { if (!ablks[i]) continue; ext4_ext_blkfree(ip, ablks[i], 1, 0); } } free(ablks, M_EXT2EXTENTS); return (error); } static int ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct m_ext2fs *fs; struct ext4_extent_path *curpath; struct ext4_extent_header *neh; struct ext4_extent_index *fidx; struct buf *bp; e4fs_daddr_t newblk; int error = 0; fs = ip->i_e2fs; curpath = path; newblk = ext4_ext_alloc_meta(ip); if (newblk == 0) return (error); bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); /* Move top-level index/leaf into new block. */ memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data)); /* Set size of new block */ neh = ext4_ext_block_header(bp->b_data); neh->eh_magic = EXT4_EXT_MAGIC; if (ext4_ext_inode_depth(ip)) neh->eh_max = ext4_ext_space_block_index(ip); else neh->eh_max = ext4_ext_space_block(ip); error = bwrite(bp); if (error) goto out; bp = NULL; curpath->ep_header->eh_magic = EXT4_EXT_MAGIC; curpath->ep_header->eh_max = ext4_ext_space_root(ip); curpath->ep_header->eh_ecount = 1; curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header); curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk; ext4_index_store_pblock(curpath->ep_index, newblk); neh = ext4_ext_inode_header(ip); fidx = EXT_FIRST_INDEX(neh); neh->eh_depth = path->ep_depth + 1; ext4_ext_dirty(ip, curpath); out: brelse(bp); return (error); } static int ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct m_ext2fs *fs; struct ext4_extent_path *curpath; int depth, i, error; fs = ip->i_e2fs; repeat: i = depth = ext4_ext_inode_depth(ip); /* Look for free index entry int the tree */ curpath = path + depth; while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) { i--; curpath--; } /* * We use already allocated block for index block, * so subsequent data blocks should be contiguous. */ if (EXT_HAS_FREE_INDEX(curpath)) { error = ext4_ext_split(ip, path, newext, i); if (error) goto out; /* Refill path. */ ext4_ext_drop_refs(path); error = ext4_ext_find_extent(ip, newext->e_blk, &path); if (error) goto out; } else { /* Tree is full, do grow in depth. */ error = ext4_ext_grow_indepth(ip, path, newext); if (error) goto out; /* Refill path. */ ext4_ext_drop_refs(path); error = ext4_ext_find_extent(ip, newext->e_blk, &path); if (error) goto out; /* Check and split tree if required. */ depth = ext4_ext_inode_depth(ip); if (path[depth].ep_header->eh_ecount == path[depth].ep_header->eh_max) goto repeat; } out: return (error); } static int ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path) { struct ext4_extent_header *eh; struct ext4_extent *ex; int32_t border; int depth, k; depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; ex = path[depth].ep_ext; if (ex == NULL || eh == NULL) return (EIO); if (!depth) return (0); /* We will correct tree if first leaf got modified only. */ if (ex != EXT_FIRST_EXTENT(eh)) return (0); k = depth - 1; border = path[depth].ep_ext->e_blk; path[k].ep_index->ei_blk = border; ext4_ext_dirty(ip, path + k); while (k--) { /* Change all left-side indexes. */ if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header)) break; path[k].ep_index->ei_blk = border; ext4_ext_dirty(ip, path + k); } return (0); } static int ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path, struct ext4_extent *newext) { struct m_ext2fs *fs; struct ext4_extent_header * eh; struct ext4_extent *ex, *nex, *nearex; struct ext4_extent_path *npath; int depth, len, error, next; fs = ip->i_e2fs; depth = ext4_ext_inode_depth(ip); ex = path[depth].ep_ext; npath = NULL; if (newext->e_len == 0 || path[depth].ep_header == NULL) return (EINVAL); /* Insert block into found extent. */ if (ex && ext4_can_extents_be_merged(ex, newext)) { ex->e_len = ex->e_len + newext->e_len; eh = path[depth].ep_header; nearex = ex; goto merge; } repeat: depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; if (eh->eh_ecount < eh->eh_max) goto has_space; /* Try next leaf */ nex = EXT_LAST_EXTENT(eh); next = ext4_ext_next_leaf_block(ip, path); if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) { KASSERT(npath == NULL, ("ext4_ext_insert_extent: bad path")); error = ext4_ext_find_extent(ip, next, &npath); if (error) goto cleanup; if (npath->ep_depth != path->ep_depth) { error = EIO; goto cleanup; } eh = npath[depth].ep_header; if (eh->eh_ecount < eh->eh_max) { path = npath; goto repeat; } } /* * There is no free space in the found leaf, * try to add a new leaf to the tree. */ error = ext4_ext_create_new_leaf(ip, path, newext); if (error) goto cleanup; depth = ext4_ext_inode_depth(ip); eh = path[depth].ep_header; has_space: nearex = path[depth].ep_ext; if (!nearex) { /* Create new extent in the leaf. */ path[depth].ep_ext = EXT_FIRST_EXTENT(eh); } else if (newext->e_blk > nearex->e_blk) { if (nearex != EXT_LAST_EXTENT(eh)) { len = EXT_MAX_EXTENT(eh) - nearex; len = (len - 1) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; memmove(nearex + 2, nearex + 1, len); } path[depth].ep_ext = nearex + 1; } else { len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; memmove(nearex + 1, nearex, len); path[depth].ep_ext = nearex; } eh->eh_ecount = eh->eh_ecount + 1; nearex = path[depth].ep_ext; nearex->e_blk = newext->e_blk; nearex->e_start_lo = newext->e_start_lo; nearex->e_start_hi = newext->e_start_hi; nearex->e_len = newext->e_len; merge: /* Try to merge extents to the right. */ while (nearex < EXT_LAST_EXTENT(eh)) { if (!ext4_can_extents_be_merged(nearex, nearex + 1)) break; /* Merge with next extent. */ nearex->e_len = nearex->e_len + nearex[1].e_len; if (nearex + 1 < EXT_LAST_EXTENT(eh)) { len = (EXT_LAST_EXTENT(eh) - nearex - 1) * sizeof(struct ext4_extent); memmove(nearex + 1, nearex + 2, len); } eh->eh_ecount = eh->eh_ecount - 1; KASSERT(eh->eh_ecount != 0, ("ext4_ext_insert_extent: bad ecount")); } /* * Try to merge extents to the left, * start from inexes correction. */ error = ext4_ext_correct_indexes(ip, path); if (error) goto cleanup; ext4_ext_dirty(ip, path + depth); cleanup: if (npath) { ext4_ext_drop_refs(npath); free(npath, M_EXT2EXTENTS); } ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; return (error); } static e4fs_daddr_t ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref, struct ucred *cred, unsigned long *count, int *perror) { struct m_ext2fs *fs; struct ext2mount *ump; e4fs_daddr_t newblk; fs = ip->i_e2fs; ump = ip->i_ump; /* * We will allocate only single block for now. */ if (*count > 1) return (0); EXT2_LOCK(ip->i_ump); *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk); if (*perror) return (0); if (newblk) { ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_update(ip->i_vnode, 1); } return (newblk); } int ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk, unsigned long max_blocks, struct ucred *cred, struct buf **bpp, - int *pallocated, uint32_t *nb) + int *pallocated, daddr_t *nb) { struct m_ext2fs *fs; struct buf *bp = NULL; struct ext4_extent_path *path; struct ext4_extent newex, *ex; e4fs_daddr_t bpref, newblk = 0; unsigned long allocated = 0; int error = 0, depth; fs = ip->i_e2fs; *pallocated = 0; path = NULL; if(bpp) *bpp = NULL; /* Check cache. */ if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) { if (bpref == EXT4_EXT_CACHE_IN) { /* Block is already allocated. */ newblk = iblk - newex.e_blk + ext4_ext_extent_pblock(&newex); allocated = newex.e_len - (iblk - newex.e_blk); goto out; } else { error = EIO; goto out2; } } error = ext4_ext_find_extent(ip, iblk, &path); if (error) { goto out2; } depth = ext4_ext_inode_depth(ip); if (path[depth].ep_ext == NULL && depth != 0) { error = EIO; goto out2; } if ((ex = path[depth].ep_ext)) { uint64_t lblk = ex->e_blk; uint16_t e_len = ex->e_len; e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex); if (e_len > EXT4_MAX_LEN) goto out2; /* If we found extent covers block, simply return it. */ if (iblk >= lblk && iblk < lblk + e_len) { newblk = iblk - lblk + e_start; allocated = e_len - (iblk - lblk); ext4_ext_put_in_cache(ip, lblk, e_len, e_start, EXT4_EXT_CACHE_IN); goto out; } } /* Allocate the new block. */ if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) { ip->i_next_alloc_goal = 0; } bpref = ext4_ext_blkpref(ip, path, iblk); allocated = max_blocks; newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error); if (!newblk) goto out2; /* Try to insert new extent into found leaf and return. */ newex.e_blk = iblk; ext4_ext_store_pblock(&newex, newblk); newex.e_len = allocated; error = ext4_ext_insert_extent(ip, path, &newex); if (error) goto out2; newblk = ext4_ext_extent_pblock(&newex); ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN); *pallocated = 1; out: if (allocated > max_blocks) allocated = max_blocks; if (bpp) { error = bread(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, cred, &bp); if (error) { brelse(bp); } else { *bpp = bp; } } out2: if (path) { ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); } if (nb) *nb = newblk; return (error); } static inline uint16_t ext4_ext_get_actual_len(struct ext4_extent *ext) { return (ext->e_len <= EXT_INIT_MAX_LEN ? ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN)); } static inline struct ext4_extent_header * ext4_ext_header(struct inode *ip) { return (struct ext4_extent_header *)ip->i_db; } static int ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex, unsigned long from, unsigned long to) { unsigned long num, start; if (from >= ex->e_blk && to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) { /* Tail cleanup. */ num = ex->e_blk + ext4_ext_get_actual_len(ex) - from; start = ext4_ext_extent_pblock(ex) + ext4_ext_get_actual_len(ex) - num; ext4_ext_blkfree(ip, start, num, 0); } return (0); } static int ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path) { e4fs_daddr_t leaf; /* Free index block. */ path--; leaf = ext4_ext_index_pblock(path->ep_index); KASSERT(path->ep_header->eh_ecount != 0, ("ext4_ext_rm_index: bad ecount")); path->ep_header->eh_ecount--; ext4_ext_dirty(ip, path); ext4_ext_blkfree(ip, leaf, 1, 0); return (0); } static int ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path, uint64_t start) { struct m_ext2fs *fs; int depth; struct ext4_extent_header *eh; unsigned int a, b, block, num; unsigned long ex_blk; unsigned short ex_len; struct ext4_extent *ex; int error, correct_index; fs = ip->i_e2fs; depth = ext4_ext_inode_depth(ip); correct_index = 0; if (!path[depth].ep_header) { if (path[depth].ep_data == NULL) return (EINVAL); path[depth].ep_header = (struct ext4_extent_header* )path[depth].ep_data; } eh = path[depth].ep_header; if (!eh) { ext2_fserr(fs, ip->i_uid, "bad header => extent corrupted"); return (EIO); } ex = EXT_LAST_EXTENT(eh); ex_blk = ex->e_blk; ex_len = ext4_ext_get_actual_len(ex); while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) { path[depth].ep_ext = ex; a = ex_blk > start ? ex_blk : start; b = (uint64_t)ex_blk + ex_len - 1 < EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS; if (a != ex_blk && b != ex_blk + ex_len - 1) return (EINVAL); else if (a != ex_blk) { /* Remove tail of the extent. */ block = ex_blk; num = a - block; } else if (b != ex_blk + ex_len - 1) { /* Remove head of the extent, not implemented. */ return (EINVAL); } else { /* Remove whole extent. */ block = ex_blk; num = 0; } if (ex == EXT_FIRST_EXTENT(eh)) correct_index = 1; error = ext4_remove_blocks(ip, ex, a, b); if (error) goto out; if (num == 0) { ext4_ext_store_pblock(ex, 0); eh->eh_ecount--; } ex->e_blk = block; ex->e_len = num; ext4_ext_dirty(ip, path + depth); ex--; ex_blk = ex->e_blk; ex_len = ext4_ext_get_actual_len(ex); }; if (correct_index && eh->eh_ecount) error = ext4_ext_correct_indexes(ip, path); /* * If this leaf is free, we should * remove it from index block above. */ if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL) error = ext4_ext_rm_index(ip, path + depth); out: return (error); } static struct buf * ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk, int depth, int flags) { struct m_ext2fs *fs; struct ext4_extent_header *eh; struct buf *bp; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, pblk), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (NULL); } eh = ext4_ext_block_header(bp->b_data); if (eh->eh_depth != depth) { ext2_fserr(fs, ip->i_uid, "unexpected eh_depth"); goto err; } error = ext4_ext_check_header(ip, eh); if (error) goto err; return (bp); err: brelse(bp); return (NULL); } static int inline ext4_ext_more_to_rm(struct ext4_extent_path *path) { KASSERT(path->ep_index != NULL, ("ext4_ext_more_to_rm: bad index from path")); if (path->ep_index < EXT_FIRST_INDEX(path->ep_header)) return (0); if (path->ep_header->eh_ecount == path->index_count) return (0); return (1); } int ext4_ext_remove_space(struct inode *ip, off_t length, int flags, struct ucred *cred, struct thread *td) { struct buf *bp; struct ext4_extent_header *ehp; struct ext4_extent_path *path; int depth; int i, error; ehp = (struct ext4_extent_header *)ip->i_db; depth = ext4_ext_inode_depth(ip); error = ext4_ext_check_header(ip, ehp); if(error) return (error); path = malloc(sizeof(struct ext4_extent_path) * (depth + 1), M_EXT2EXTENTS, M_WAITOK | M_ZERO); if (!path) return (ENOMEM); i = 0; path[0].ep_header = ehp; path[0].ep_depth = depth; while (i >= 0 && error == 0) { if (i == depth) { /* This is leaf. */ error = ext4_ext_rm_leaf(ip, path, length); if (error) break; free(path[i].ep_data, M_EXT2EXTENTS); path[i].ep_data = NULL; i--; continue; } /* This is index. */ if (!path[i].ep_header) path[i].ep_header = (struct ext4_extent_header *)path[i].ep_data; if (!path[i].ep_index) { /* This level hasn't touched yet. */ path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header); path[i].index_count = path[i].ep_header->eh_ecount + 1; } else { /* We've already was here, see at next index. */ path[i].ep_index--; } if (ext4_ext_more_to_rm(path + i)) { memset(path + i + 1, 0, sizeof(*path)); bp = ext4_read_extent_tree_block(ip, ext4_ext_index_pblock(path[i].ep_index), path[0].ep_depth - (i + 1), 0); if (!bp) { error = EIO; break; } ext4_ext_fill_path_bdata(&path[i+1], bp, ext4_ext_index_pblock(path[i].ep_index)); brelse(bp); path[i].index_count = path[i].ep_header->eh_ecount; i++; } else { if (path[i].ep_header->eh_ecount == 0 && i > 0) { /* Index is empty, remove it. */ error = ext4_ext_rm_index(ip, path + i); } free(path[i].ep_data, M_EXT2EXTENTS); path[i].ep_data = NULL; i--; } } if (path->ep_header->eh_ecount == 0) { /* * Truncate the tree to zero. */ ext4_ext_header(ip)->eh_depth = 0; ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip); ext4_ext_dirty(ip, path); } ext4_ext_drop_refs(path); free(path, M_EXT2EXTENTS); return (error); } Index: head/sys/fs/ext2fs/ext2_extents.h =================================================================== --- head/sys/fs/ext2fs/ext2_extents.h (revision 327583) +++ head/sys/fs/ext2fs/ext2_extents.h (revision 327584) @@ -1,128 +1,129 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012, 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FS_EXT2FS_EXT2_EXTENTS_H_ #define _FS_EXT2FS_EXT2_EXTENTS_H_ #include #define EXT4_EXT_MAGIC 0xf30a #define EXT4_MAX_BLOCKS 0xffffffff #define EXT_INIT_MAX_LEN (1UL << 15) #define EXT4_MAX_LEN (EXT_INIT_MAX_LEN - 1) #define EXT4_EXT_DEPTH_MAX 5 #define EXT4_EXT_CACHE_NO 0 #define EXT4_EXT_CACHE_GAP 1 #define EXT4_EXT_CACHE_IN 2 /* * Ext4 file system extent on disk. */ struct ext4_extent { uint32_t e_blk; /* first logical block */ uint16_t e_len; /* number of blocks */ uint16_t e_start_hi; /* high 16 bits of physical block */ uint32_t e_start_lo; /* low 32 bits of physical block */ }; /* * Extent index on disk. */ struct ext4_extent_index { uint32_t ei_blk; /* indexes logical blocks */ uint32_t ei_leaf_lo; /* points to physical block of the * next level */ uint16_t ei_leaf_hi; /* high 16 bits of physical block */ uint16_t ei_unused; }; /* * Extent tree header. */ struct ext4_extent_header { uint16_t eh_magic; /* magic number: 0xf30a */ uint16_t eh_ecount; /* number of valid entries */ uint16_t eh_max; /* capacity of store in entries */ uint16_t eh_depth; /* the depth of extent tree */ uint32_t eh_gen; /* generation of extent tree */ }; /* * Save cached extent. */ struct ext4_extent_cache { daddr_t ec_start; /* extent start */ uint32_t ec_blk; /* logical block */ uint32_t ec_len; uint32_t ec_type; }; /* * Save path to some extent. */ struct ext4_extent_path { int index_count; uint16_t ep_depth; uint64_t ep_blk; char *ep_data; struct ext4_extent *ep_ext; struct ext4_extent_index *ep_index; struct ext4_extent_header *ep_header; }; #define EXT_FIRST_EXTENT(hdr) ((struct ext4_extent *)(((char *)(hdr)) + \ sizeof(struct ext4_extent_header))) #define EXT_FIRST_INDEX(hdr) ((struct ext4_extent_index *)(((char *)(hdr)) + \ sizeof(struct ext4_extent_header))) #define EXT_LAST_EXTENT(hdr) (EXT_FIRST_EXTENT((hdr)) + (hdr)->eh_ecount - 1) #define EXT_LAST_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_ecount - 1) #define EXT4_EXTENT_TAIL_OFFSET(hdr) (sizeof(struct ext4_extent_header) + \ (sizeof(struct ext4_extent) * (hdr)->eh_max)) #define EXT_HAS_FREE_INDEX(path) \ ((path)->ep_header->eh_ecount < (path)->ep_header->eh_max) #define EXT_MAX_EXTENT(hdr) (EXT_FIRST_EXTENT(hdr) + ((hdr)->eh_max) - 1) #define EXT_MAX_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_max - 1) struct inode; struct m_ext2fs; void ext4_ext_tree_init(struct inode *ip); int ext4_ext_in_cache(struct inode *, daddr_t, struct ext4_extent *); void ext4_ext_put_cache(struct inode *, struct ext4_extent *, int); int ext4_ext_find_extent(struct inode *, daddr_t, struct ext4_extent_path **); void ext4_ext_path_free(struct ext4_extent_path *path); int ext4_ext_remove_space(struct inode *ip, off_t length, int flags, struct ucred *cred, struct thread *td); int ext4_ext_get_blocks(struct inode *ip, int64_t iblock, - unsigned long max_blocks, struct ucred *cred, struct buf **bpp, int *allocate, uint32_t *); + unsigned long max_blocks, struct ucred *cred, struct buf **bpp, + int *allocate, daddr_t *); #ifdef EXT2FS_DEBUG void ext4_ext_print_extent_tree_status(struct inode * ip); #endif #endif /* !_FS_EXT2FS_EXT2_EXTENTS_H_ */ Index: head/sys/fs/ext2fs/ext2_extern.h =================================================================== --- head/sys/fs/ext2fs/ext2_extern.h (revision 327583) +++ head/sys/fs/ext2fs/ext2_extern.h (revision 327584) @@ -1,120 +1,122 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_extern.h 8.3 (Berkeley) 4/16/94 * $FreeBSD$ */ #ifndef _FS_EXT2FS_EXT2_EXTERN_H_ #define _FS_EXT2FS_EXT2_EXTERN_H_ struct ext2fs_dinode; struct ext2fs_direct_2; struct ext2fs_searchslot; struct indir; struct inode; struct mount; struct vfsconf; struct vnode; int ext2_add_entry(struct vnode *, struct ext2fs_direct_2 *); int ext2_alloc(struct inode *, daddr_t, e4fs_daddr_t, int, struct ucred *, e4fs_daddr_t *); e4fs_daddr_t ext2_alloc_meta(struct inode *ip); int ext2_balloc(struct inode *, e2fs_lbn_t, int, struct ucred *, struct buf **, int); int ext2_blkatoff(struct vnode *, off_t, char **, struct buf **); void ext2_blkfree(struct inode *, e4fs_daddr_t, long); e4fs_daddr_t ext2_blkpref(struct inode *, e2fs_lbn_t, int, e2fs_daddr_t *, e2fs_daddr_t); int ext2_bmap(struct vop_bmap_args *); int ext2_bmaparray(struct vnode *, daddr_t, daddr_t *, int *, int *); int ext4_bmapext(struct vnode *, int32_t, int64_t *, int *, int *); void ext2_clusteracct(struct m_ext2fs *, char *, int, e4fs_daddr_t, int); void ext2_dirbad(struct inode *ip, doff_t offset, char *how); void ext2_fserr(struct m_ext2fs *, uid_t, char *); void ext2_ei2i(struct ext2fs_dinode *, struct inode *); int ext2_getlbns(struct vnode *, daddr_t, struct indir *, int *); int ext2_i2ei(struct inode *, struct ext2fs_dinode *); void ext2_itimes(struct vnode *vp); int ext2_reallocblks(struct vop_reallocblks_args *); int ext2_reclaim(struct vop_reclaim_args *); int ext2_truncate(struct vnode *, off_t, int, struct ucred *, struct thread *); int ext2_update(struct vnode *, int); int ext2_valloc(struct vnode *, int, struct ucred *, struct vnode **); int ext2_vfree(struct vnode *, ino_t, int); int ext2_vinit(struct mount *, struct vop_vector *, struct vnode **vpp); int ext2_lookup(struct vop_cachedlookup_args *); int ext2_readdir(struct vop_readdir_args *); #ifdef EXT2FS_DEBUG void ext2_print_inode(struct inode *); #endif int ext2_direnter(struct inode *, struct vnode *, struct componentname *); int ext2_dirremove(struct vnode *, struct componentname *); int ext2_dirrewrite(struct inode *, struct inode *, struct componentname *); int ext2_dirempty(struct inode *, ino_t, struct ucred *); int ext2_checkpath(struct inode *, struct inode *, struct ucred *); int ext2_cg_has_sb(struct m_ext2fs *fs, int cg); int ext2_inactive(struct vop_inactive_args *); int ext2_htree_add_entry(struct vnode *, struct ext2fs_direct_2 *, struct componentname *); int ext2_htree_create_index(struct vnode *, struct componentname *, struct ext2fs_direct_2 *); int ext2_htree_has_idx(struct inode *); int ext2_htree_hash(const char *, int, uint32_t *, int, uint32_t *, uint32_t *); int ext2_htree_lookup(struct inode *, const char *, int, struct buf **, int *, doff_t *, doff_t *, doff_t *, struct ext2fs_searchslot *); int ext2_search_dirblock(struct inode *, void *, int *, const char *, int, int *, doff_t *, doff_t *, doff_t *, struct ext2fs_searchslot *); +uint32_t e2fs_gd_get_ndirs(struct ext2_gd *gd); +uint64_t e2fs_gd_get_i_tables(struct ext2_gd *gd); int ext2_gd_csum_verify(struct m_ext2fs *fs, struct cdev *dev); void ext2_gd_csum_set(struct m_ext2fs *fs); /* Flags to low-level allocation routines. * The low 16-bits are reserved for IO_ flags from vnode.h. */ #define BA_CLRBUF 0x00010000 /* Clear invalid areas of buffer. */ #define BA_SEQMASK 0x7F000000 /* Bits holding seq heuristic. */ #define BA_SEQSHIFT 24 #define BA_SEQMAX 0x7F extern struct vop_vector ext2_vnodeops; extern struct vop_vector ext2_fifoops; #endif /* !_FS_EXT2FS_EXT2_EXTERN_H_ */ Index: head/sys/fs/ext2fs/ext2_hash.c =================================================================== --- head/sys/fs/ext2fs/ext2_hash.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_hash.c (revision 327584) @@ -1,318 +1,319 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND RSA-MD) * * Copyright (c) 2010, 2013 Zheng Liu * Copyright (c) 2012, Vyacheslav Matyushin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * The following notice applies to the code in ext2_half_md4(): * * Copyright (C) 1990-2, RSA Data Security, Inc. All rights reserved. * * License to copy and use this software is granted provided that it * is identified as the "RSA Data Security, Inc. MD4 Message-Digest * Algorithm" in all material mentioning or referencing this software * or this function. * * License is also granted to make and use derivative works provided * that such works are identified as "derived from the RSA Data * Security, Inc. MD4 Message-Digest Algorithm" in all material * mentioning or referencing the derived work. * * RSA Data Security, Inc. makes no representations concerning either * the merchantability of this software or the suitability of this * software for any particular purpose. It is provided "as is" * without express or implied warranty of any kind. * * These notices must be retained in any copies of any part of this * documentation and/or software. */ #include #include #include #include #include #include +#include #include #include #include #include /* F, G, and H are MD4 functions */ #define F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define G(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) /* * FF, GG, and HH are transformations for rounds 1, 2, and 3. * Rotation is separated from addition to prevent recomputation. */ #define FF(a, b, c, d, x, s) { \ (a) += F ((b), (c), (d)) + (x); \ (a) = ROTATE_LEFT ((a), (s)); \ } #define GG(a, b, c, d, x, s) { \ (a) += G ((b), (c), (d)) + (x) + (uint32_t)0x5A827999; \ (a) = ROTATE_LEFT ((a), (s)); \ } #define HH(a, b, c, d, x, s) { \ (a) += H ((b), (c), (d)) + (x) + (uint32_t)0x6ED9EBA1; \ (a) = ROTATE_LEFT ((a), (s)); \ } /* * MD4 basic transformation. It transforms state based on block. * * This is a half md4 algorithm since Linux uses this algorithm for dir * index. This function is derived from the RSA Data Security, Inc. MD4 * Message-Digest Algorithm and was modified as necessary. * * The return value of this function is uint32_t in Linux, but actually we don't * need to check this value, so in our version this function doesn't return any * value. */ static void ext2_half_md4(uint32_t hash[4], uint32_t data[8]) { uint32_t a = hash[0], b = hash[1], c = hash[2], d = hash[3]; /* Round 1 */ FF(a, b, c, d, data[0], 3); FF(d, a, b, c, data[1], 7); FF(c, d, a, b, data[2], 11); FF(b, c, d, a, data[3], 19); FF(a, b, c, d, data[4], 3); FF(d, a, b, c, data[5], 7); FF(c, d, a, b, data[6], 11); FF(b, c, d, a, data[7], 19); /* Round 2 */ GG(a, b, c, d, data[1], 3); GG(d, a, b, c, data[3], 5); GG(c, d, a, b, data[5], 9); GG(b, c, d, a, data[7], 13); GG(a, b, c, d, data[0], 3); GG(d, a, b, c, data[2], 5); GG(c, d, a, b, data[4], 9); GG(b, c, d, a, data[6], 13); /* Round 3 */ HH(a, b, c, d, data[3], 3); HH(d, a, b, c, data[7], 9); HH(c, d, a, b, data[2], 11); HH(b, c, d, a, data[6], 15); HH(a, b, c, d, data[1], 3); HH(d, a, b, c, data[5], 9); HH(c, d, a, b, data[0], 11); HH(b, c, d, a, data[4], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } /* * Tiny Encryption Algorithm. */ static void ext2_tea(uint32_t hash[4], uint32_t data[8]) { uint32_t tea_delta = 0x9E3779B9; uint32_t sum; uint32_t x = hash[0], y = hash[1]; int n = 16; int i = 1; while (n-- > 0) { sum = i * tea_delta; x += ((y << 4) + data[0]) ^ (y + sum) ^ ((y >> 5) + data[1]); y += ((x << 4) + data[2]) ^ (x + sum) ^ ((x >> 5) + data[3]); i++; } hash[0] += x; hash[1] += y; } static uint32_t ext2_legacy_hash(const char *name, int len, int unsigned_char) { uint32_t h0, h1 = 0x12A3FE2D, h2 = 0x37ABE8F9; uint32_t multi = 0x6D22F5; const unsigned char *uname = (const unsigned char *)name; const signed char *sname = (const signed char *)name; int val, i; for (i = 0; i < len; i++) { if (unsigned_char) val = (u_int)*uname++; else val = (int)*sname++; h0 = h2 + (h1 ^ (val * multi)); if (h0 & 0x80000000) h0 -= 0x7FFFFFFF; h2 = h1; h1 = h0; } return (h1 << 1); } static void ext2_prep_hashbuf(const char *src, int slen, uint32_t *dst, int dlen, int unsigned_char) { uint32_t padding = slen | (slen << 8) | (slen << 16) | (slen << 24); uint32_t buf_val; const unsigned char *ubuf = (const unsigned char *)src; const signed char *sbuf = (const signed char *)src; int len, i; int buf_byte; if (slen > dlen) len = dlen; else len = slen; buf_val = padding; for (i = 0; i < len; i++) { if (unsigned_char) buf_byte = (u_int)ubuf[i]; else buf_byte = (int)sbuf[i]; if ((i % 4) == 0) buf_val = padding; buf_val <<= 8; buf_val += buf_byte; if ((i % 4) == 3) { *dst++ = buf_val; dlen -= sizeof(uint32_t); buf_val = padding; } } dlen -= sizeof(uint32_t); if (dlen >= 0) *dst++ = buf_val; dlen -= sizeof(uint32_t); while (dlen >= 0) { *dst++ = padding; dlen -= sizeof(uint32_t); } } int ext2_htree_hash(const char *name, int len, uint32_t *hash_seed, int hash_version, uint32_t *hash_major, uint32_t *hash_minor) { uint32_t hash[4]; uint32_t data[8]; uint32_t major = 0, minor = 0; int unsigned_char = 0; if (!name || !hash_major) return (-1); if (len < 1 || len > 255) goto error; hash[0] = 0x67452301; hash[1] = 0xEFCDAB89; hash[2] = 0x98BADCFE; hash[3] = 0x10325476; if (hash_seed) memcpy(hash, hash_seed, sizeof(hash)); switch (hash_version) { case EXT2_HTREE_TEA_UNSIGNED: unsigned_char = 1; /* FALLTHROUGH */ case EXT2_HTREE_TEA: while (len > 0) { ext2_prep_hashbuf(name, len, data, 16, unsigned_char); ext2_tea(hash, data); len -= 16; name += 16; } major = hash[0]; minor = hash[1]; break; case EXT2_HTREE_LEGACY_UNSIGNED: unsigned_char = 1; /* FALLTHROUGH */ case EXT2_HTREE_LEGACY: major = ext2_legacy_hash(name, len, unsigned_char); break; case EXT2_HTREE_HALF_MD4_UNSIGNED: unsigned_char = 1; /* FALLTHROUGH */ case EXT2_HTREE_HALF_MD4: while (len > 0) { ext2_prep_hashbuf(name, len, data, 32, unsigned_char); ext2_half_md4(hash, data); len -= 32; name += 32; } major = hash[1]; minor = hash[2]; break; default: goto error; } major &= ~1; if (major == (EXT2_HTREE_EOF << 1)) major = (EXT2_HTREE_EOF - 1) << 1; *hash_major = major; if (hash_minor) *hash_minor = minor; return (0); error: *hash_major = 0; if (hash_minor) *hash_minor = 0; return (-1); } Index: head/sys/fs/ext2fs/ext2_subr.c =================================================================== --- head/sys/fs/ext2fs/ext2_subr.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_subr.c (revision 327584) @@ -1,194 +1,194 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_subr.c 8.2 (Berkeley) 9/21/93 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include -#include #include +#include #include #include #include #include /* * Return buffer with the contents of block "offset" from the beginning of * directory "ip". If "res" is non-zero, fill it in with a pointer to the * remaining space in the directory. */ int ext2_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) { struct inode *ip; struct m_ext2fs *fs; struct buf *bp; e2fs_lbn_t lbn; int error, bsize; ip = VTOI(vp); fs = ip->i_e2fs; lbn = lblkno(fs, offset); bsize = blksize(fs, ip, lbn); if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } if (res) *res = (char *)bp->b_data + blkoff(fs, offset); *bpp = bp; return (0); } /* * Update the cluster map because of an allocation of free like ffs. * * Cnt == 1 means free; cnt == -1 means allocating. */ void ext2_clusteracct(struct m_ext2fs *fs, char *bbp, int cg, e4fs_daddr_t bno, int cnt) { int32_t *sump = fs->e2fs_clustersum[cg].cs_sum; int32_t *lp; e4fs_daddr_t start, end, loc, forw, back; int bit, i; /* Initialize the cluster summary array. */ if (fs->e2fs_clustersum[cg].cs_init == 0) { int run = 0; bit = 1; loc = 0; for (i = 0; i < fs->e2fs->e2fs_fpg; i++) { if ((bbp[loc] & bit) == 0) run++; else if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; } fs->e2fs_clustersum[cg].cs_init = 1; } if (fs->e2fs_contigsumsize <= 0) return; /* Find the size of the cluster going forward. */ start = bno + 1; end = start + fs->e2fs_contigsumsize; if (end > fs->e2fs->e2fs_fpg) end = fs->e2fs->e2fs_fpg; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i < end; i++) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } forw = i - start; /* Find the size of the cluster going backward. */ start = bno - 1; end = start - fs->e2fs_contigsumsize; if (end < 0) end = -1; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i > end; i--) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != 0) bit >>= 1; else { loc--; bit = 1 << (NBBY - 1); } } back = start - i; /* * Account for old cluster and the possibly new forward and * back clusters. */ i = back + forw + 1; if (i > fs->e2fs_contigsumsize) i = fs->e2fs_contigsumsize; sump[i] += cnt; if (back > 0) sump[back] -= cnt; if (forw > 0) sump[forw] -= cnt; /* Update cluster summary information. */ lp = &sump[fs->e2fs_contigsumsize]; for (i = fs->e2fs_contigsumsize; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; } Index: head/sys/fs/ext2fs/ext2_vfsops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vfsops.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_vfsops.c (revision 327584) @@ -1,1131 +1,1168 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td); static int ext2_mountfs(struct vnode *, struct mount *); static int ext2_reload(struct mount *mp, struct thread *td); static int ext2_sbupdate(struct ext2mount *, int); static int ext2_cgupdate(struct ext2mount *, int); static vfs_unmount_t ext2_unmount; static vfs_root_t ext2_root; static vfs_statfs_t ext2_statfs; static vfs_sync_t ext2_sync; static vfs_vget_t ext2_vget; static vfs_fhtovp_t ext2_fhtovp; static vfs_mount_t ext2_mount; MALLOC_DEFINE(M_EXT2NODE, "ext2_node", "EXT2 vnode private part"); static MALLOC_DEFINE(M_EXT2MNT, "ext2_mount", "EXT2 mount structure"); static struct vfsops ext2fs_vfsops = { .vfs_fhtovp = ext2_fhtovp, .vfs_mount = ext2_mount, .vfs_root = ext2_root, /* root inode via vget */ .vfs_statfs = ext2_statfs, .vfs_sync = ext2_sync, .vfs_unmount = ext2_unmount, .vfs_vget = ext2_vget, }; VFS_SET(ext2fs_vfsops, ext2fs, 0); static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly); static int compute_sb_data(struct vnode * devvp, struct ext2fs * es, struct m_ext2fs * fs); static const char *ext2_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "multilabel", "suiddir", "nosymfollow", "sync", "union", NULL }; /* * VFS Operations. * * mount system call */ static int ext2_mount(struct mount *mp) { struct vfsoptlist *opts; struct vnode *devvp; struct thread *td; struct ext2mount *ump = NULL; struct m_ext2fs *fs; struct nameidata nd, *ndp = &nd; accmode_t accmode; char *path, *fspec; int error, flags, len; td = curthread; opts = mp->mnt_optnew; if (vfs_filteropt(opts, ext2_opts)) return (EINVAL); vfs_getopt(opts, "fspath", (void **)&path, NULL); /* Double-check the length of path.. */ if (strlen(path) >= MAXMNTLEN) return (ENAMETOOLONG); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOEXT2(mp); fs = ump->um_e2fs; error = 0; if (fs->e2fs_ronly == 0 && vfs_flagopt(opts, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2_flushfiles(mp, flags, td); if (error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) { fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } fs->e2fs_ronly = 1; vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY); g_topology_lock(); g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ext2_reload(mp, td); if (error) return (error); devvp = ump->um_devvp; if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) { if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0)) return (EPERM); /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); g_topology_lock(); error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) return (error); if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 || (fs->e2fs->e2fs_state & E2FS_ERRORS)) { if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->e2fs_fsmnt); return (EPERM); } } fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; (void)ext2_cgupdate(ump, MNT_WAIT); fs->e2fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } if (vfs_flagopt(opts, "export", NULL, 0)) { /* Process export requests in vfs_mount.c. */ return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp)) != 0) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * XXXRW: VOP_ACCESS() enough? */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = ext2_mountfs(devvp, mp); } else { if (devvp != ump->um_devvp) { vput(devvp); return (EINVAL); /* needs translation */ } else vput(devvp); } if (error) { vrele(devvp); return (error); } ump = VFSTOEXT2(mp); fs = ump->um_e2fs; /* * Note that this strncpy() is ok because of a check at the start * of ext2_mount(). */ strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN); fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0'; vfs_mountedfrom(mp, fspec); return (0); } static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly) { uint32_t i, mask; if (es->e2fs_magic != E2FS_MAGIC) { printf("ext2fs: %s: wrong magic number %#x (expected %#x)\n", devtoname(dev), es->e2fs_magic, E2FS_MAGIC); return (1); } if (es->e2fs_rev > E2FS_REV0) { mask = es->e2fs_features_incompat & ~(EXT2F_INCOMPAT_SUPP | EXT4F_RO_INCOMPAT_SUPP); if (mask) { printf("WARNING: mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(incompat)/sizeof(struct ext2_feature); i++) if (mask & incompat[i].mask) printf("%s ", incompat[i].name); printf("\n"); return (1); } mask = es->e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP; if (!ronly && mask) { printf("WARNING: R/W mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(ro_compat)/sizeof(struct ext2_feature); i++) if (mask & ro_compat[i].mask) printf("%s ", ro_compat[i].name); printf("\n"); return (1); } } return (0); } /* * This computes the fields of the m_ext2fs structure from the * data in the ext2fs structure read in. */ static int compute_sb_data(struct vnode *devvp, struct ext2fs *es, struct m_ext2fs *fs) { - int db_count, error; - int i; + int g_count = 0, error; + int i, j; int logic_sb_block = 1; /* XXX for now */ struct buf *bp; - uint32_t e2fs_descpb; + uint32_t e2fs_descpb, e2fs_gdbcount_alloc; + fs->e2fs_bcount = es->e2fs_bcount; + fs->e2fs_rbcount = es->e2fs_rbcount; + fs->e2fs_fbcount = es->e2fs_fbcount; + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { + fs->e2fs_bcount |= (uint64_t)(es->e4fs_bcount_hi) << 32; + fs->e2fs_rbcount |= (uint64_t)(es->e4fs_rbcount_hi) << 32; + fs->e2fs_fbcount |= (uint64_t)(es->e4fs_fbcount_hi) << 32; + } fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + es->e2fs_log_bsize; fs->e2fs_bsize = 1U << fs->e2fs_bshift; fs->e2fs_fsbtodb = es->e2fs_log_bsize + 1; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << es->e2fs_log_fsize; if (fs->e2fs_fsize) fs->e2fs_fpb = fs->e2fs_bsize / fs->e2fs_fsize; fs->e2fs_bpg = es->e2fs_bpg; fs->e2fs_fpg = es->e2fs_fpg; fs->e2fs_ipg = es->e2fs_ipg; if (es->e2fs_rev == E2FS_REV0) { fs->e2fs_isize = E2FS_REV0_INODE_SIZE; } else { fs->e2fs_isize = es->e2fs_inode_size; /* * Simple sanity check for superblock inode size value. */ if (EXT2_INODE_SIZE(fs) < E2FS_REV0_INODE_SIZE || EXT2_INODE_SIZE(fs) > fs->e2fs_bsize || (fs->e2fs_isize & (fs->e2fs_isize - 1)) != 0) { printf("ext2fs: invalid inode size %d\n", fs->e2fs_isize); return (EIO); } } /* Check for extra isize in big inodes. */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_EXTRA_ISIZE) && EXT2_INODE_SIZE(fs) < sizeof(struct ext2fs_dinode)) { printf("ext2fs: no space for extra inode timestamps\n"); return (EINVAL); } /* Check for group descriptor size */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT) && (es->e3fs_desc_size != sizeof(struct ext2_gd))) { printf("ext2fs: group descriptor size unsupported %d\n", es->e3fs_desc_size); return (EINVAL); } fs->e2fs_ipb = fs->e2fs_bsize / EXT2_INODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs_ipg / fs->e2fs_ipb; /* s_resuid / s_resgid ? */ - fs->e2fs_gcount = howmany(es->e2fs_bcount - es->e2fs_first_dblock, + fs->e2fs_gcount = howmany(fs->e2fs_bcount - es->e2fs_first_dblock, EXT2_BLOCKS_PER_GROUP(fs)); - e2fs_descpb = fs->e2fs_bsize / sizeof(struct ext2_gd); - db_count = howmany(fs->e2fs_gcount, e2fs_descpb); - fs->e2fs_gdbcount = db_count; - fs->e2fs_gd = malloc(db_count * fs->e2fs_bsize, - M_EXT2MNT, M_WAITOK); + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { + e2fs_descpb = fs->e2fs_bsize / sizeof(struct ext2_gd); + e2fs_gdbcount_alloc = howmany(fs->e2fs_gcount, e2fs_descpb); + } else { + e2fs_descpb = fs->e2fs_bsize / E2FS_REV0_GD_SIZE; + e2fs_gdbcount_alloc = howmany(fs->e2fs_gcount, + fs->e2fs_bsize / sizeof(struct ext2_gd)); + } + fs->e2fs_gdbcount = howmany(fs->e2fs_gcount, e2fs_descpb); + fs->e2fs_gd = malloc(e2fs_gdbcount_alloc * fs->e2fs_bsize, + M_EXT2MNT, M_WAITOK | M_ZERO); fs->e2fs_contigdirs = malloc(fs->e2fs_gcount * sizeof(*fs->e2fs_contigdirs), M_EXT2MNT, M_WAITOK | M_ZERO); /* * Adjust logic_sb_block. * Godmar thinks: if the blocksize is greater than 1024, then * the superblock is logically part of block zero. */ if (fs->e2fs_bsize > SBSIZE) logic_sb_block = 0; - for (i = 0; i < db_count; i++) { + for (i = 0; i < fs->e2fs_gdbcount; i++) { error = bread(devvp, fsbtodb(fs, logic_sb_block + i + 1), fs->e2fs_bsize, NOCRED, &bp); if (error) { free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); brelse(bp); return (error); } - e2fs_cgload((struct ext2_gd *)bp->b_data, - &fs->e2fs_gd[ - i * fs->e2fs_bsize / sizeof(struct ext2_gd)], - fs->e2fs_bsize); + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { + memcpy(&fs->e2fs_gd[ + i * fs->e2fs_bsize / sizeof(struct ext2_gd)], + bp->b_data, fs->e2fs_bsize); + } else { + for (j = 0; j < e2fs_descpb && + g_count < fs->e2fs_gcount; j++, g_count++) + memcpy(&fs->e2fs_gd[g_count], + bp->b_data + j * E2FS_REV0_GD_SIZE, + E2FS_REV0_GD_SIZE); + } brelse(bp); bp = NULL; } /* Verify cg csum */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { error = ext2_gd_csum_verify(fs, devvp->v_rdev); if (error) return (error); } /* Initialization for the ext2 Orlov allocator variant. */ fs->e2fs_total_dir = 0; for (i = 0; i < fs->e2fs_gcount; i++) fs->e2fs_total_dir += fs->e2fs_gd[i].ext2bgd_ndirs; if (es->e2fs_rev == E2FS_REV0 || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_LARGEFILE)) fs->e2fs_maxfilesize = 0x7fffffff; else { fs->e2fs_maxfilesize = 0xffffffffffff; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE)) fs->e2fs_maxfilesize = 0x7fffffffffffffff; } if (es->e4fs_flags & E2FS_UNSIGNED_HASH) { fs->e2fs_uhash = 3; } else if ((es->e4fs_flags & E2FS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->e4fs_flags |= E2FS_UNSIGNED_HASH; fs->e2fs_uhash = 3; #else es->e4fs_flags |= E2FS_SIGNED_HASH; #endif } return (0); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) invalidate all cluster summary information. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. * XXX we are missing some steps, in particular # 3, this has to be reviewed. */ static int ext2_reload(struct mount *mp, struct thread *td) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct ext2fs *es; struct m_ext2fs *fs; struct csum *sump; int error, i; int32_t *lp; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOEXT2(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ext2_reload: dirty1"); VOP_UNLOCK(devvp, 0); /* * Step 2: re-read superblock from disk. * constants have been adjusted for ext2 */ if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) return (error); es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, devvp->v_rdev, 0) != 0) { brelse(bp); return (EIO); /* XXX needs translation */ } fs = VFSTOEXT2(mp)->um_e2fs; bcopy(bp->b_data, fs->e2fs, sizeof(struct ext2fs)); if ((error = compute_sb_data(devvp, es, fs)) != 0) { brelse(bp); return (error); } #ifdef UNKLAR if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; #endif brelse(bp); /* * Step 3: invalidate all cluster summary information. */ if (fs->e2fs_contigsumsize > 0) { lp = fs->e2fs_maxcluster; sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) { *lp++ = fs->e2fs_contigsumsize; sump->cs_init = 0; bzero(sump->cs_sum, fs->e2fs_contigsumsize + 1); } } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ext2_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { VOP_UNLOCK(vp, 0); vrele(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)), ip); brelse(bp); VOP_UNLOCK(vp, 0); vrele(vp); } return (0); } /* * Common code for mount and mountroot. */ static int ext2_mountfs(struct vnode *devvp, struct mount *mp) { struct ext2mount *ump; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *es; struct cdev *dev = devvp->v_rdev; struct g_consumer *cp; struct bufobj *bo; struct csum *sump; int error; int ronly; int i; u_long size; int32_t *lp; int32_t e2fs_maxcontig; ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0); /* XXX: use VOP_ACESS to check FS perms */ g_topology_lock(); error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1); g_topology_unlock(); VOP_UNLOCK(devvp, 0); if (error) return (error); /* XXX: should we check for some sectorsize or 512 instead? */ if (((SBSIZE % cp->provider->sectorsize) != 0) || (SBSIZE < cp->provider->sectorsize)) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); return (EINVAL); } bo = &devvp->v_bufobj; bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; bp = NULL; ump = NULL; if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) goto out; es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, dev, ronly) != 0) { error = EINVAL; /* XXX needs translation */ goto out; } if ((es->e2fs_state & E2FS_ISCLEAN) == 0 || (es->e2fs_state & E2FS_ERRORS)) { if (ronly || (mp->mnt_flag & MNT_FORCE)) { printf( "WARNING: Filesystem was not properly dismounted\n"); } else { printf( "WARNING: R/W mount denied. Filesystem is not clean - run fsck\n"); error = EPERM; goto out; } } ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO); /* * I don't know whether this is the right strategy. Note that * we dynamically allocate both an m_ext2fs and an ext2fs * while Linux keeps the super block in a locked buffer. */ ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_EXT2MNT, M_WAITOK | M_ZERO); ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs), M_EXT2MNT, M_WAITOK); mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF); bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs)); if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs))) goto out; /* * Calculate the maximum contiguous blocks and size of cluster summary * array. In FFS this is done by newfs; however, the superblock * in ext2fs doesn't have these variables, so we can calculate * them here. */ e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize); ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG); if (ump->um_e2fs->e2fs_contigsumsize > 0) { size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t); ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK); size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum); ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK); lp = ump->um_e2fs->e2fs_maxcluster; sump = ump->um_e2fs->e2fs_clustersum; for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) { *lp++ = ump->um_e2fs->e2fs_contigsumsize; sump->cs_init = 0; sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) * sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO); } } brelse(bp); bp = NULL; fs = ump->um_e2fs; fs->e2fs_ronly = ronly; /* ronly is set according to mnt_flags */ /* * If the fs is not mounted read-only, make sure the super block is * always written back on a sync(). */ fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0; if (ronly == 0) { fs->e2fs_fmod = 1; /* mark it modified */ fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; /* set fs invalid */ } mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_bo = &devvp->v_bufobj; ump->um_cp = cp; /* * Setting those two parameters allowed us to use * ufs_bmap w/o changse! */ ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs); ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1; ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs); if (ronly == 0) ext2_sbupdate(ump, MNT_WAIT); /* * Initialize filesystem stat information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_USES_BCACHE; MNT_IUNLOCK(mp); return (0); out: if (bp) brelse(bp); if (cp != NULL) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); } if (ump) { mtx_destroy(EXT2_MTX(ump)); free(ump->um_e2fs->e2fs_gd, M_EXT2MNT); free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT); free(ump->um_e2fs->e2fs, M_EXT2MNT); free(ump->um_e2fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; } return (error); } /* * Unmount system call. */ static int ext2_unmount(struct mount *mp, int mntflags) { struct ext2mount *ump; struct m_ext2fs *fs; struct csum *sump; int error, flags, i, ronly; flags = 0; if (mntflags & MNT_FORCE) { if (mp->mnt_flag & MNT_ROOTFS) return (EINVAL); flags |= FORCECLOSE; } if ((error = ext2_flushfiles(mp, flags, curthread)) != 0) return (error); ump = VFSTOEXT2(mp); fs = ump->um_e2fs; ronly = fs->e2fs_ronly; if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) { if (fs->e2fs_wasvalid) fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } g_topology_lock(); g_vfs_close(ump->um_cp); g_topology_unlock(); vrele(ump->um_devvp); sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) free(sump->cs_sum, M_EXT2MNT); free(fs->e2fs_clustersum, M_EXT2MNT); free(fs->e2fs_maxcluster, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs, M_EXT2MNT); free(fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); return (error); } /* * Flush out all the files in a filesystem. */ static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td) { int error; error = vflush(mp, 0, flags, td); return (error); } /* * Get filesystem statistics. */ int ext2_statfs(struct mount *mp, struct statfs *sbp) { struct ext2mount *ump; struct m_ext2fs *fs; uint32_t overhead, overhead_per_group, ngdb; int i, ngroups; ump = VFSTOEXT2(mp); fs = ump->um_e2fs; if (fs->e2fs->e2fs_magic != E2FS_MAGIC) panic("ext2_statfs"); /* * Compute the overhead (FS structures) */ overhead_per_group = 1 /* block bitmap */ + 1 /* inode bitmap */ + fs->e2fs_itpg; overhead = fs->e2fs->e2fs_first_dblock + fs->e2fs_gcount * overhead_per_group; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) { for (i = 0, ngroups = 0; i < fs->e2fs_gcount; i++) { if (ext2_cg_has_sb(fs, i)) ngroups++; } } else { ngroups = fs->e2fs_gcount; } ngdb = fs->e2fs_gdbcount; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_compat & EXT2F_COMPAT_RESIZE) ngdb += fs->e2fs->e2fs_reserved_ngdb; overhead += ngroups * (1 /* superblock */ + ngdb); sbp->f_bsize = EXT2_FRAG_SIZE(fs); sbp->f_iosize = EXT2_BLOCK_SIZE(fs); - sbp->f_blocks = fs->e2fs->e2fs_bcount - overhead; - sbp->f_bfree = fs->e2fs->e2fs_fbcount; - sbp->f_bavail = sbp->f_bfree - fs->e2fs->e2fs_rbcount; + sbp->f_blocks = fs->e2fs_bcount - overhead; + sbp->f_bfree = fs->e2fs_fbcount; + sbp->f_bavail = sbp->f_bfree - fs->e2fs_rbcount; sbp->f_files = fs->e2fs->e2fs_icount; sbp->f_ffree = fs->e2fs->e2fs_ficount; return (0); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ static int ext2_sync(struct mount *mp, int waitfor) { struct vnode *mvp, *vp; struct thread *td; struct inode *ip; struct ext2mount *ump = VFSTOEXT2(mp); struct m_ext2fs *fs; int error, allerror = 0; td = curthread; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("ext2_sync: rofs mod"); } /* * Write back each (modified) inode. */ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && (vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; } error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); if (error) { if (error == ENOENT) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = VOP_FSYNC(vp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(vp, 0); vrele(vp); } /* * Force stale filesystem control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp, 0); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs->e2fs_wtime = time_second; if ((error = ext2_cgupdate(ump, waitfor)) != 0) allerror = error; } return (allerror); } /* * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ static int ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; struct buf *bp; struct vnode *vp; struct thread *td; int i, error; int used_blocks; td = curthread; error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); ump = VFSTOEXT2(mp); ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { free(ip, M_EXT2NODE); *vpp = NULL; return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } /* convert ext2 inode to dinode */ ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ino)), ip); ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; /* * Now we want to make sure that block pointers for unused * blocks are zeroed out - ext2_balloc depends on this * although for regular files and directories only * * If IN_E4EXTENTS is enabled, unused blocks are not zeroed * out because we could corrupt the extent tree. */ if (!(ip->i_flag & IN_E4EXTENTS) && (S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode))) { used_blocks = howmany(ip->i_size, fs->e2fs_bsize); for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++) ip->i_db[i] = 0; } #ifdef EXT2FS_DEBUG ext2_print_inode(ip); ext4_ext_print_extent_tree_status(ip); #endif bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - call ext2_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct inode *ip; struct ufid *ufhp; struct vnode *nvp; struct m_ext2fs *fs; int error; ufhp = (struct ufid *)fhp; fs = VFSTOEXT2(mp)->um_e2fs; if (ufhp->ufid_ino < EXT2_ROOTINO || ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs->e2fs_ipg) return (ESTALE); error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp); if (error) { *vpp = NULLVP; return (error); } ip = VTOI(nvp); if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) { vput(nvp); *vpp = NULLVP; return (ESTALE); } *vpp = nvp; vnode_create_vobject(*vpp, 0, curthread); return (0); } /* * Write a superblock and associated information back to disk. */ static int ext2_sbupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct ext2fs *es = fs->e2fs; struct buf *bp; int error = 0; + es->e2fs_bcount = fs->e2fs_bcount & 0xffffffff; + es->e2fs_rbcount = fs->e2fs_rbcount & 0xffffffff; + es->e2fs_fbcount = fs->e2fs_fbcount & 0xffffffff; + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { + es->e4fs_bcount_hi = fs->e2fs_bcount >> 32; + es->e4fs_rbcount_hi = fs->e2fs_rbcount >> 32; + es->e4fs_fbcount_hi = fs->e2fs_fbcount >> 32; + } + bp = getblk(mp->um_devvp, SBLOCK, SBSIZE, 0, 0, 0); bcopy((caddr_t)es, bp->b_data, (u_int)sizeof(struct ext2fs)); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); /* * The buffers for group descriptors, inode bitmaps and block bitmaps * are not busy at this point and are (hopefully) written by the * usual sync mechanism. No need to write them here. */ return (error); } int ext2_cgupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct buf *bp; - int i, error = 0, allerror = 0; + int i, j, g_count = 0, error = 0, allerror = 0; allerror = ext2_sbupdate(mp, waitfor); /* Update gd csums */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) ext2_gd_csum_set(fs); for (i = 0; i < fs->e2fs_gdbcount; i++) { bp = getblk(mp->um_devvp, fsbtodb(fs, fs->e2fs->e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, 0, 0, 0); - e2fs_cgsave(&fs->e2fs_gd[ - i * fs->e2fs_bsize / sizeof(struct ext2_gd)], - (struct ext2_gd *)bp->b_data, fs->e2fs_bsize); + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT)) { + memcpy(bp->b_data, &fs->e2fs_gd[ + i * fs->e2fs_bsize / sizeof(struct ext2_gd)], + fs->e2fs_bsize); + } else { + for (j = 0; j < fs->e2fs_bsize / E2FS_REV0_GD_SIZE && + g_count < fs->e2fs_gcount; j++, g_count++) + memcpy(bp->b_data + j * E2FS_REV0_GD_SIZE, + &fs->e2fs_gd[g_count], E2FS_REV0_GD_SIZE); + } if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); } if (!allerror && error) allerror = error; return (allerror); } /* * Return the root of a filesystem. */ static int ext2_root(struct mount *mp, int flags, struct vnode **vpp) { struct vnode *nvp; int error; error = VFS_VGET(mp, EXT2_ROOTINO, LK_EXCLUSIVE, &nvp); if (error) return (error); *vpp = nvp; return (0); } Index: head/sys/fs/ext2fs/ext2_vnops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vnops.c (revision 327583) +++ head/sys/fs/ext2fs/ext2_vnops.c (revision 327584) @@ -1,2308 +1,2308 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_vnops.c 8.7 (Berkeley) 2/3/94 * @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95 * $FreeBSD$ */ #include "opt_suiddir.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_directio.h" #include #include #include #include -#include #include +#include #include #include #include #include static int ext2_makeinode(int mode, struct vnode *, struct vnode **, struct componentname *); static void ext2_itimes_locked(struct vnode *); static vop_access_t ext2_access; static int ext2_chmod(struct vnode *, int, struct ucred *, struct thread *); static int ext2_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *); static vop_close_t ext2_close; static vop_create_t ext2_create; static vop_fsync_t ext2_fsync; static vop_getattr_t ext2_getattr; static vop_ioctl_t ext2_ioctl; static vop_link_t ext2_link; static vop_mkdir_t ext2_mkdir; static vop_mknod_t ext2_mknod; static vop_open_t ext2_open; static vop_pathconf_t ext2_pathconf; static vop_print_t ext2_print; static vop_read_t ext2_read; static vop_readlink_t ext2_readlink; static vop_remove_t ext2_remove; static vop_rename_t ext2_rename; static vop_rmdir_t ext2_rmdir; static vop_setattr_t ext2_setattr; static vop_strategy_t ext2_strategy; static vop_symlink_t ext2_symlink; static vop_write_t ext2_write; static vop_deleteextattr_t ext2_deleteextattr; static vop_getextattr_t ext2_getextattr; static vop_listextattr_t ext2_listextattr; static vop_setextattr_t ext2_setextattr; static vop_vptofh_t ext2_vptofh; static vop_close_t ext2fifo_close; static vop_kqfilter_t ext2fifo_kqfilter; /* Global vfs data structures for ext2. */ struct vop_vector ext2_vnodeops = { .vop_default = &default_vnodeops, .vop_access = ext2_access, .vop_bmap = ext2_bmap, .vop_cachedlookup = ext2_lookup, .vop_close = ext2_close, .vop_create = ext2_create, .vop_fsync = ext2_fsync, .vop_getpages = vnode_pager_local_getpages, .vop_getpages_async = vnode_pager_local_getpages_async, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_ioctl = ext2_ioctl, .vop_link = ext2_link, .vop_lookup = vfs_cache_lookup, .vop_mkdir = ext2_mkdir, .vop_mknod = ext2_mknod, .vop_open = ext2_open, .vop_pathconf = ext2_pathconf, .vop_poll = vop_stdpoll, .vop_print = ext2_print, .vop_read = ext2_read, .vop_readdir = ext2_readdir, .vop_readlink = ext2_readlink, .vop_reallocblks = ext2_reallocblks, .vop_reclaim = ext2_reclaim, .vop_remove = ext2_remove, .vop_rename = ext2_rename, .vop_rmdir = ext2_rmdir, .vop_setattr = ext2_setattr, .vop_strategy = ext2_strategy, .vop_symlink = ext2_symlink, .vop_write = ext2_write, .vop_deleteextattr = ext2_deleteextattr, .vop_getextattr = ext2_getextattr, .vop_listextattr = ext2_listextattr, .vop_setextattr = ext2_setextattr, #ifdef UFS_ACL .vop_getacl = ext2_getacl, .vop_setacl = ext2_setacl, .vop_aclcheck = ext2_aclcheck, #endif /* UFS_ACL */ .vop_vptofh = ext2_vptofh, }; struct vop_vector ext2_fifoops = { .vop_default = &fifo_specops, .vop_access = ext2_access, .vop_close = ext2fifo_close, .vop_fsync = ext2_fsync, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_kqfilter = ext2fifo_kqfilter, .vop_pathconf = ext2_pathconf, .vop_print = ext2_print, .vop_read = VOP_PANIC, .vop_reclaim = ext2_reclaim, .vop_setattr = ext2_setattr, .vop_write = VOP_PANIC, .vop_vptofh = ext2_vptofh, }; /* * A virgin directory (no blushing please). * Note that the type and namlen fields are reversed relative to ext2. * Also, we don't use `struct odirtemplate', since it would just cause * endianness problems. */ static struct dirtemplate mastertemplate = { 0, 12, 1, EXT2_FT_DIR, ".", 0, DIRBLKSIZ - 12, 2, EXT2_FT_DIR, ".." }; static struct dirtemplate omastertemplate = { 0, 12, 1, EXT2_FT_UNKNOWN, ".", 0, DIRBLKSIZ - 12, 2, EXT2_FT_UNKNOWN, ".." }; static void ext2_itimes_locked(struct vnode *vp) { struct inode *ip; struct timespec ts; ASSERT_VI_LOCKED(vp, __func__); ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0) return; if ((vp->v_type == VBLK || vp->v_type == VCHR)) ip->i_flag |= IN_LAZYMOD; else ip->i_flag |= IN_MODIFIED; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { vfs_timestamp(&ts); if (ip->i_flag & IN_ACCESS) { ip->i_atime = ts.tv_sec; ip->i_atimensec = ts.tv_nsec; } if (ip->i_flag & IN_UPDATE) { ip->i_mtime = ts.tv_sec; ip->i_mtimensec = ts.tv_nsec; ip->i_modrev++; } if (ip->i_flag & IN_CHANGE) { ip->i_ctime = ts.tv_sec; ip->i_ctimensec = ts.tv_nsec; } } ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); } void ext2_itimes(struct vnode *vp) { VI_LOCK(vp); ext2_itimes_locked(vp); VI_UNLOCK(vp); } /* * Create a regular file */ static int ext2_create(struct vop_create_args *ap) { int error; error = ext2_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode), ap->a_dvp, ap->a_vpp, ap->a_cnp); if (error != 0) return (error); if ((ap->a_cnp->cn_flags & MAKEENTRY) != 0) cache_enter(ap->a_dvp, *ap->a_vpp, ap->a_cnp); return (0); } static int ext2_open(struct vop_open_args *ap) { if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) return (EOPNOTSUPP); /* * Files marked append-only must be opened for appending. */ if ((VTOI(ap->a_vp)->i_flags & APPEND) && (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) return (EPERM); vnode_create_vobject(ap->a_vp, VTOI(ap->a_vp)->i_size, ap->a_td); return (0); } /* * Close called. * * Update the times on the inode. */ static int ext2_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (0); } static int ext2_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); accmode_t accmode = ap->a_accmode; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) return (EOPNOTSUPP); /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ if (accmode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } /* If immutable bit set, nobody gets to write it. */ if ((accmode & VWRITE) && (ip->i_flags & (SF_IMMUTABLE | SF_SNAPSHOT))) return (EPERM); error = vaccess(vp->v_type, ip->i_mode, ip->i_uid, ip->i_gid, ap->a_accmode, ap->a_cred, NULL); return (error); } static int ext2_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct vattr *vap = ap->a_vap; ext2_itimes(vp); /* * Copy from inode table */ vap->va_fsid = dev2udev(ip->i_devvp->v_rdev); vap->va_fileid = ip->i_number; vap->va_mode = ip->i_mode & ~IFMT; vap->va_nlink = ip->i_nlink; vap->va_uid = ip->i_uid; vap->va_gid = ip->i_gid; vap->va_rdev = ip->i_rdev; vap->va_size = ip->i_size; vap->va_atime.tv_sec = ip->i_atime; vap->va_atime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_atimensec : 0; vap->va_mtime.tv_sec = ip->i_mtime; vap->va_mtime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_mtimensec : 0; vap->va_ctime.tv_sec = ip->i_ctime; vap->va_ctime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_ctimensec : 0; if E2DI_HAS_XTIME(ip) { vap->va_birthtime.tv_sec = ip->i_birthtime; vap->va_birthtime.tv_nsec = ip->i_birthnsec; } vap->va_flags = ip->i_flags; vap->va_gen = ip->i_gen; vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; vap->va_bytes = dbtob((u_quad_t)ip->i_blocks); vap->va_type = IFTOVT(ip->i_mode); vap->va_filerev = ip->i_modrev; return (0); } /* * Set attribute vnode op. called from several syscalls */ static int ext2_setattr(struct vop_setattr_args *ap) { struct vattr *vap = ap->a_vap; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct ucred *cred = ap->a_cred; struct thread *td = curthread; int error; /* * Check for unsettable attributes. */ if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { return (EINVAL); } if (vap->va_flags != VNOVAL) { /* Disallow flags not supported by ext2fs. */ if (vap->va_flags & ~(SF_APPEND | SF_IMMUTABLE | UF_NODUMP)) return (EOPNOTSUPP); if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * Callers may only modify the file flags on objects they * have VADMIN rights for. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Unprivileged processes and privileged processes in * jail() are not permitted to unset system flags, or * modify flags if any system flags are set. * Privileged non-jail processes may not modify system flags * if securelevel > 0 and any existing system flags are set. */ if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND)) { error = securelevel_gt(cred, 0); if (error) return (error); } } else { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND) || ((vap->va_flags ^ ip->i_flags) & SF_SETTABLE)) return (EPERM); } ip->i_flags = vap->va_flags; ip->i_flag |= IN_CHANGE; if (ip->i_flags & (IMMUTABLE | APPEND)) return (0); } if (ip->i_flags & (IMMUTABLE | APPEND)) return (EPERM); /* * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if ((error = ext2_chown(vp, vap->va_uid, vap->va_gid, cred, td)) != 0) return (error); } if (vap->va_size != VNOVAL) { /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ switch (vp->v_type) { case VDIR: return (EISDIR); case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } if ((error = ext2_truncate(vp, vap->va_size, 0, cred, td)) != 0) return (error); } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * From utimes(2): * If times is NULL, ... The caller must be the owner of * the file, have permission to write the file, or be the * super-user. * If times is non-NULL, ... The caller must be the owner of * the file or be the super-user. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td)) && ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || (error = VOP_ACCESS(vp, VWRITE, cred, td)))) return (error); ip->i_flag |= IN_CHANGE | IN_MODIFIED; if (vap->va_atime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_ACCESS; ip->i_atime = vap->va_atime.tv_sec; ip->i_atimensec = vap->va_atime.tv_nsec; } if (vap->va_mtime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_UPDATE; ip->i_mtime = vap->va_mtime.tv_sec; ip->i_mtimensec = vap->va_mtime.tv_nsec; } ip->i_birthtime = vap->va_birthtime.tv_sec; ip->i_birthnsec = vap->va_birthtime.tv_nsec; error = ext2_update(vp, 0); if (error) return (error); } error = 0; if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); error = ext2_chmod(vp, (int)vap->va_mode, cred, td); } return (error); } /* * Change the mode on a file. * Inode must be locked before calling. */ static int ext2_chmod(struct vnode *vp, int mode, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); int error; /* * To modify the permissions on a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Privileged processes may set the sticky bit on non-directories, * as well as set the setgid bit on a file with a group that the * process is not a member of. */ if (vp->v_type != VDIR && (mode & S_ISTXT)) { error = priv_check_cred(cred, PRIV_VFS_STICKYFILE, 0); if (error) return (EFTYPE); } if (!groupmember(ip->i_gid, cred) && (mode & ISGID)) { error = priv_check_cred(cred, PRIV_VFS_SETGID, 0); if (error) return (error); } ip->i_mode &= ~ALLPERMS; ip->i_mode |= (mode & ALLPERMS); ip->i_flag |= IN_CHANGE; return (0); } /* * Perform chown operation on inode ip; * inode must be locked prior to call. */ static int ext2_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); uid_t ouid; gid_t ogid; int error = 0; if (uid == (uid_t)VNOVAL) uid = ip->i_uid; if (gid == (gid_t)VNOVAL) gid = ip->i_gid; /* * To modify the ownership of a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * To change the owner of a file, or change the group of a file * to a group of which we are not a member, the caller must * have privilege. */ if (uid != ip->i_uid || (gid != ip->i_gid && !groupmember(gid, cred))) { error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0); if (error) return (error); } ogid = ip->i_gid; ouid = ip->i_uid; ip->i_gid = gid; ip->i_uid = uid; ip->i_flag |= IN_CHANGE; if ((ip->i_mode & (ISUID | ISGID)) && (ouid != uid || ogid != gid)) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0) != 0) ip->i_mode &= ~(ISUID | ISGID); } return (0); } /* * Synch an open file. */ /* ARGSUSED */ static int ext2_fsync(struct vop_fsync_args *ap) { /* * Flush all dirty buffers associated with a vnode. */ vop_stdfsync(ap); return (ext2_update(ap->a_vp, ap->a_waitfor == MNT_WAIT)); } /* * Mknod vnode call */ /* ARGSUSED */ static int ext2_mknod(struct vop_mknod_args *ap) { struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; ino_t ino; int error; error = ext2_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ if (!(ip->i_flag & IN_E4EXTENTS)) ip->i_rdev = vap->va_rdev; } /* * Remove inode, then reload it through VFS_VGET so it is * checked to see if it is an alias of an existing entry in * the inode cache. XXX I don't believe this is necessary now. */ (*vpp)->v_type = VNON; ino = ip->i_number; /* Save this before vgone() invalidates ip. */ vgone(*vpp); vput(*vpp); error = VFS_VGET(ap->a_dvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { *vpp = NULL; return (error); } return (0); } static int ext2_remove(struct vop_remove_args *ap) { struct inode *ip; struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; int error; ip = VTOI(vp); if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(dvp)->i_flags & APPEND)) { error = EPERM; goto out; } error = ext2_dirremove(dvp, ap->a_cnp); if (error == 0) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } static unsigned short ext2_max_nlink(struct inode *ip) { struct m_ext2fs *fs; fs = ip->i_e2fs; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_DIR_NLINK)) return (EXT4_LINK_MAX); else return (EXT2_LINK_MAX); } /* * link vnode call */ static int ext2_link(struct vop_link_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; struct inode *ip; int error; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_link: no name"); #endif ip = VTOI(vp); if ((nlink_t)ip->i_nlink >= ext2_max_nlink(ip)) { error = EMLINK; goto out; } if (ip->i_flags & (IMMUTABLE | APPEND)) { error = EPERM; goto out; } ip->i_nlink++; ip->i_flag |= IN_CHANGE; error = ext2_update(vp, !DOINGASYNC(vp)); if (!error) error = ext2_direnter(ip, tdvp, cnp); if (error) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } static int ext2_inc_nlink(struct inode *ip) { ip->i_nlink++; if (ext2_htree_has_idx(ip) && ip->i_nlink > 1) { if (ip->i_nlink >= ext2_max_nlink(ip) || ip->i_nlink == 2) ip->i_nlink = 1; } else if (ip->i_nlink > ext2_max_nlink(ip)) { ip->i_nlink--; return (EMLINK); } return (0); } static void ext2_dec_nlink(struct inode *ip) { if (!S_ISDIR(ip->i_mode) || ip->i_nlink > 2) ip->i_nlink--; } /* * Rename system call. * rename("foo", "bar"); * is essentially * unlink("bar"); * link("foo", "bar"); * unlink("foo"); * but ``atomically''. Can't do full commit without saving state in the * inode on disk which isn't feasible at this time. Best we can do is * always guarantee the target exists. * * Basic algorithm is: * * 1) Bump link count on source while we're linking it to the * target. This also ensure the inode won't be deleted out * from underneath us while we work (it may be truncated by * a concurrent `trunc' or `open' for creation). * 2) Link source to destination. If destination already exists, * delete it first. * 3) Unlink source reference to inode if still around. If a * directory was moved and the parent of the destination * is different from the source, patch the ".." entry in the * directory. */ static int ext2_rename(struct vop_rename_args *ap) { struct vnode *tvp = ap->a_tvp; struct vnode *tdvp = ap->a_tdvp; struct vnode *fvp = ap->a_fvp; struct vnode *fdvp = ap->a_fdvp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; struct inode *ip, *xp, *dp; struct dirtemplate dirbuf; int doingdirectory = 0, oldparent = 0, newparent = 0; int error = 0; u_char namlen; #ifdef INVARIANTS if ((tcnp->cn_flags & HASBUF) == 0 || (fcnp->cn_flags & HASBUF) == 0) panic("ext2_rename: no name"); #endif /* * Check for cross-device rename. */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; abortit: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); return (error); } if (tvp && ((VTOI(tvp)->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(tdvp)->i_flags & APPEND))) { error = EPERM; goto abortit; } /* * Renaming a file to itself has no effect. The upper layers should * not call us in that case. Temporarily just warn if they do. */ if (fvp == tvp) { printf("ext2_rename: fvp == tvp (can't happen)\n"); error = 0; goto abortit; } if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0) goto abortit; dp = VTOI(fdvp); ip = VTOI(fvp); if (ip->i_nlink >= ext2_max_nlink(ip) && !ext2_htree_has_idx(ip)) { VOP_UNLOCK(fvp, 0); error = EMLINK; goto abortit; } if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (dp->i_flags & APPEND)) { VOP_UNLOCK(fvp, 0); error = EPERM; goto abortit; } if ((ip->i_mode & IFMT) == IFDIR) { /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || dp == ip || (fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT || (ip->i_flag & IN_RENAME)) { VOP_UNLOCK(fvp, 0); error = EINVAL; goto abortit; } ip->i_flag |= IN_RENAME; oldparent = dp->i_number; doingdirectory++; } vrele(fdvp); /* * When the target exists, both the directory * and target vnodes are returned locked. */ dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); /* * 1) Bump link count while we're moving stuff * around. If we crash somewhere before * completing our work, the link count * may be wrong, but correctable. */ ext2_inc_nlink(ip); ip->i_flag |= IN_CHANGE; if ((error = ext2_update(fvp, !DOINGASYNC(fvp))) != 0) { VOP_UNLOCK(fvp, 0); goto bad; } /* * If ".." must be changed (ie the directory gets a new * parent) then the source directory must not be in the * directory hierarchy above the target, as this would * orphan everything below the source directory. Also * the user must have write permission in the source so * as to be able to change "..". We must repeat the call * to namei, as the parent directory is unlocked by the * call to checkpath(). */ error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_thread); VOP_UNLOCK(fvp, 0); if (oldparent != dp->i_number) newparent = dp->i_number; if (doingdirectory && newparent) { if (error) /* write access check above */ goto bad; if (xp != NULL) vput(tvp); error = ext2_checkpath(ip, dp, tcnp->cn_cred); if (error) goto out; VREF(tdvp); error = relookup(tdvp, &tvp, tcnp); if (error) goto out; vrele(tdvp); dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); } /* * 2) If target doesn't exist, link the target * to the source and unlink the source. * Otherwise, rewrite the target directory * entry to reference the source inode and * expunge the original entry's existence. */ if (xp == NULL) { if (dp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Account for ".." in new directory. * When source and destination have the same * parent we don't fool with the link count. */ if (doingdirectory && newparent) { error = ext2_inc_nlink(dp); if (error) goto bad; dp->i_flag |= IN_CHANGE; error = ext2_update(tdvp, !DOINGASYNC(tdvp)); if (error) goto bad; } error = ext2_direnter(ip, tdvp, tcnp); if (error) { if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; (void)ext2_update(tdvp, 1); } goto bad; } vput(tdvp); } else { if (xp->i_devvp != dp->i_devvp || xp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Short circuit rename(foo, foo). */ if (xp->i_number == ip->i_number) panic("ext2_rename: same file"); /* * If the parent directory is "sticky", then the user must * own the parent directory, or the destination of the rename, * otherwise the destination may not be changed (except by * root). This implements append-only directories. */ if ((dp->i_mode & S_ISTXT) && tcnp->cn_cred->cr_uid != 0 && tcnp->cn_cred->cr_uid != dp->i_uid && xp->i_uid != tcnp->cn_cred->cr_uid) { error = EPERM; goto bad; } /* * Target must be empty if a directory and have no links * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ if ((xp->i_mode & IFMT) == IFDIR) { if (!ext2_dirempty(xp, dp->i_number, tcnp->cn_cred)) { error = ENOTEMPTY; goto bad; } if (!doingdirectory) { error = ENOTDIR; goto bad; } cache_purge(tdvp); } else if (doingdirectory) { error = EISDIR; goto bad; } error = ext2_dirrewrite(dp, ip, tcnp); if (error) goto bad; /* * If the target directory is in the same * directory as the source directory, * decrement the link count on the parent * of the target directory. */ if (doingdirectory && !newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } vput(tdvp); /* * Adjust the link count of the target to * reflect the dirrewrite above. If this is * a directory it is empty and there are * no links to it, so we can squash the inode and * any space associated with it. We disallowed * renaming over top of a directory with links to * it above, as the remaining link would point to * a directory without "." or ".." entries. */ ext2_dec_nlink(xp); if (doingdirectory) { if (--xp->i_nlink != 0) panic("ext2_rename: linked directory"); error = ext2_truncate(tvp, (off_t)0, IO_SYNC, tcnp->cn_cred, tcnp->cn_thread); } xp->i_flag |= IN_CHANGE; vput(tvp); xp = NULL; } /* * 3) Unlink the source. */ fcnp->cn_flags &= ~MODMASK; fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; VREF(fdvp); error = relookup(fdvp, &fvp, fcnp); if (error == 0) vrele(fdvp); if (fvp != NULL) { xp = VTOI(fvp); dp = VTOI(fdvp); } else { /* * From name has disappeared. IN_RENAME is not sufficient * to protect against directory races due to timing windows, * so we can't panic here. */ vrele(ap->a_fvp); return (0); } /* * Ensure that the directory entry still exists and has not * changed while the new name has been entered. If the source is * a file then the entry may have been unlinked or renamed. In * either case there is no further work to be done. If the source * is a directory then it cannot have been rmdir'ed; its link * count of three would cause a rmdir to fail with ENOTEMPTY. * The IN_RENAME flag ensures that it cannot be moved by another * rename. */ if (xp != ip) { /* * From name resolves to a different inode. IN_RENAME is * not sufficient protection against timing window races * so we can't panic here. */ } else { /* * If the source is a directory with a * new parent, the link count of the old * parent directory must be decremented * and ".." set to point to the new parent. */ if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; error = vn_rdwr(UIO_READ, fvp, (caddr_t)&dirbuf, sizeof(struct dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); if (error == 0) { /* Like ufs little-endian: */ namlen = dirbuf.dotdot_type; if (namlen != 2 || dirbuf.dotdot_name[0] != '.' || dirbuf.dotdot_name[1] != '.') { ext2_dirbad(xp, (doff_t)12, "rename: mangled dir"); } else { dirbuf.dotdot_ino = newparent; (void)vn_rdwr(UIO_WRITE, fvp, (caddr_t)&dirbuf, sizeof(struct dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); cache_purge(fdvp); } } } error = ext2_dirremove(fdvp, fcnp); if (!error) { ext2_dec_nlink(xp); xp->i_flag |= IN_CHANGE; } xp->i_flag &= ~IN_RENAME; } if (dp) vput(fdvp); if (xp) vput(fvp); vrele(ap->a_fvp); return (error); bad: if (xp) vput(ITOV(xp)); vput(ITOV(dp)); out: if (doingdirectory) ip->i_flag &= ~IN_RENAME; if (vn_lock(fvp, LK_EXCLUSIVE) == 0) { ext2_dec_nlink(ip); ip->i_flag |= IN_CHANGE; ip->i_flag &= ~IN_RENAME; vput(fvp); } else vrele(fvp); return (error); } #ifdef UFS_ACL static int ext2_do_posix1e_acl_inheritance_dir(struct vnode *dvp, struct vnode *tvp, mode_t dmode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *dacl, *acl; acl = acl_alloc(M_WAITOK); dacl = acl_alloc(M_WAITOK); /* * Retrieve default ACL from parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. If the ACL is empty, fall through to * the "not defined or available" case. */ if (acl->acl_cnt != 0) { dmode = acl_posix1e_newfilemode(dmode, acl); ip->i_mode = dmode; *dacl = *acl; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = dmode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); if (error == 0) error = VOP_SETACL(tvp, ACL_TYPE_DEFAULT, dacl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above * was supposed to free acl. */ #ifdef DEBUG printf("ext2_mkdir: VOP_GETACL() but no VOP_SETACL()\n"); #endif /* DEBUG */ break; default: goto out; } out: acl_free(acl); acl_free(dacl); return (error); } static int ext2_do_posix1e_acl_inheritance_file(struct vnode *dvp, struct vnode *tvp, mode_t mode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *acl; acl = acl_alloc(M_WAITOK); /* * Retrieve default ACL for parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. */ if (acl->acl_cnt != 0) { /* * Two possible ways for default ACL to not * be present. First, the EA can be * undefined, or second, the default ACL can * be blank. If it's blank, fall through to * the it's not defined case. */ mode = acl_posix1e_newfilemode(mode, acl); ip->i_mode = mode; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = mode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above was * supposed to free acl. */ printf("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()\n"); /* panic("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()"); */ break; default: goto out; } out: acl_free(acl); return (error); } #endif /* UFS_ACL */ /* * Mkdir system call */ static int ext2_mkdir(struct vop_mkdir_args *ap) { struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; struct vnode *tvp; struct dirtemplate dirtemplate, *dtp; int error, dmode; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_mkdir: no name"); #endif dp = VTOI(dvp); if ((nlink_t)dp->i_nlink >= ext2_max_nlink(dp) && !ext2_htree_has_idx(dp)) { error = EMLINK; goto out; } dmode = vap->va_mode & 0777; dmode |= IFDIR; /* * Must simulate part of ext2_makeinode here to acquire the inode, * but not have it entered in the parent directory. The entry is * made later after writing "." and ".." entries. */ error = ext2_valloc(dvp, dmode, cnp->cn_cred, &tvp); if (error) goto out; ip = VTOI(tvp); ip->i_gid = dp->i_gid; #ifdef SUIDDIR { /* * if we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * The new directory also inherits the SUID bit. * If user's UID and dir UID are the same, * 'give it away' so that the SUID is still forced on. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (dp->i_mode & ISUID) && dp->i_uid) { dmode |= ISUID; ip->i_uid = dp->i_uid; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = dmode; tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ ip->i_nlink = 2; if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; error = ext2_update(tvp, 1); /* * Bump link count in parent directory * to reflect work done below. Should * be done before reference is created * so reparation is possible if we crash. */ ext2_inc_nlink(dp); dp->i_flag |= IN_CHANGE; error = ext2_update(dvp, !DOINGASYNC(dvp)); if (error) goto bad; /* Initialize directory with "." and ".." from static template. */ if (EXT2_HAS_INCOMPAT_FEATURE(ip->i_e2fs, EXT2F_INCOMPAT_FTYPE)) dtp = &mastertemplate; else dtp = &omastertemplate; dirtemplate = *dtp; dirtemplate.dot_ino = ip->i_number; dirtemplate.dotdot_ino = dp->i_number; /* * note that in ext2 DIRBLKSIZ == blocksize, not DEV_BSIZE so let's * just redefine it - for this function only */ #undef DIRBLKSIZ #define DIRBLKSIZ VTOI(dvp)->i_e2fs->e2fs_bsize dirtemplate.dotdot_reclen = DIRBLKSIZ - 12; error = vn_rdwr(UIO_WRITE, tvp, (caddr_t)&dirtemplate, sizeof(dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, cnp->cn_cred, NOCRED, NULL, NULL); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; goto bad; } if (DIRBLKSIZ > VFSTOEXT2(dvp->v_mount)->um_mountp->mnt_stat.f_bsize) /* XXX should grow with balloc() */ panic("ext2_mkdir: blksize"); else { ip->i_size = DIRBLKSIZ; ip->i_flag |= IN_CHANGE; } #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_dir(dvp, tvp, dmode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ /* Directory set up, now install its entry in the parent directory. */ error = ext2_direnter(ip, dvp, cnp); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } bad: /* * No need to do an explicit VOP_TRUNCATE here, vrele will do this * for us because we set the link count to 0. */ if (error) { ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); } else *ap->a_vpp = tvp; out: return (error); #undef DIRBLKSIZ #define DIRBLKSIZ DEV_BSIZE } /* * Rmdir system call. */ static int ext2_rmdir(struct vop_rmdir_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; int error; ip = VTOI(vp); dp = VTOI(dvp); /* * Verify the directory is empty (and valid). * (Rmdir ".." won't be valid since * ".." will contain a reference to * the current directory and thus be * non-empty.) */ if (!ext2_dirempty(ip, dp->i_number, cnp->cn_cred)) { error = ENOTEMPTY; goto out; } if ((dp->i_flags & APPEND) || (ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND))) { error = EPERM; goto out; } /* * Delete reference to directory before purging * inode. If we crash in between, the directory * will be reattached to lost+found, */ error = ext2_dirremove(dvp, cnp); if (error) goto out; ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; cache_purge(dvp); VOP_UNLOCK(dvp, 0); /* * Truncate inode. The only stuff left * in the directory is "." and "..". */ ip->i_nlink = 0; error = ext2_truncate(vp, (off_t)0, IO_SYNC, cnp->cn_cred, cnp->cn_thread); cache_purge(ITOV(ip)); if (vn_lock(dvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { VOP_UNLOCK(vp, 0); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } out: return (error); } /* * symlink -- make a symbolic link */ static int ext2_symlink(struct vop_symlink_args *ap) { struct vnode *vp, **vpp = ap->a_vpp; struct inode *ip; int len, error; error = ext2_makeinode(IFLNK | ap->a_vap->va_mode, ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); vp = *vpp; len = strlen(ap->a_target); if (len < vp->v_mount->mnt_maxsymlinklen) { ip = VTOI(vp); bcopy(ap->a_target, (char *)ip->i_shortlink, len); ip->i_size = len; ip->i_flag |= IN_CHANGE | IN_UPDATE; } else error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, ap->a_cnp->cn_cred, NOCRED, NULL, NULL); if (error) vput(vp); return (error); } /* * Return target name of a symbolic link */ static int ext2_readlink(struct vop_readlink_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); int isize; isize = ip->i_size; if (isize < vp->v_mount->mnt_maxsymlinklen) { uiomove((char *)ip->i_shortlink, isize, ap->a_uio); return (0); } return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred)); } /* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. * * In order to be able to swap to a file, the ext2_bmaparray() operation may not * deadlock on memory. See ext2_bmap() for details. */ static int ext2_strategy(struct vop_strategy_args *ap) { struct buf *bp = ap->a_bp; struct vnode *vp = ap->a_vp; struct bufobj *bo; daddr_t blkno; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) panic("ext2_strategy: spec"); if (bp->b_blkno == bp->b_lblkno) { if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS) error = ext4_bmapext(vp, bp->b_lblkno, &blkno, NULL, NULL); else error = ext2_bmaparray(vp, bp->b_lblkno, &blkno, NULL, NULL); bp->b_blkno = blkno; if (error) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return (0); } if ((long)bp->b_blkno == -1) vfs_bio_clrbuf(bp); } if ((long)bp->b_blkno == -1) { bufdone(bp); return (0); } bp->b_iooffset = dbtob(bp->b_blkno); bo = VFSTOEXT2(vp->v_mount)->um_bo; BO_STRATEGY(bo, bp); return (0); } /* * Print out the contents of an inode. */ static int ext2_print(struct vop_print_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); vn_printf(ip->i_devvp, "\tino %ju", (uintmax_t)ip->i_number); if (vp->v_type == VFIFO) fifo_printinfo(vp); printf("\n"); return (0); } /* * Close wrapper for fifos. * * Update the times on the inode then do device close. */ static int ext2fifo_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (fifo_specops.vop_close(ap)); } /* * Kqfilter wrapper for fifos. * * Fall through to ext2 kqfilter routines if needed */ static int ext2fifo_kqfilter(struct vop_kqfilter_args *ap) { int error; error = fifo_specops.vop_kqfilter(ap); if (error) error = vfs_kqfilter(ap); return (error); } /* * Return POSIX pathconf information applicable to ext2 filesystems. */ static int ext2_pathconf(struct vop_pathconf_args *ap) { int error = 0; switch (ap->a_name) { case _PC_LINK_MAX: if (ext2_htree_has_idx(VTOI(ap->a_vp))) *ap->a_retval = INT_MAX; else *ap->a_retval = ext2_max_nlink(VTOI(ap->a_vp)); break; case _PC_NAME_MAX: *ap->a_retval = NAME_MAX; break; case _PC_PIPE_BUF: if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) *ap->a_retval = PIPE_BUF; else error = EINVAL; break; case _PC_CHOWN_RESTRICTED: *ap->a_retval = 1; break; case _PC_NO_TRUNC: *ap->a_retval = 1; break; #ifdef UFS_ACL case _PC_ACL_EXTENDED: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = 1; else *ap->a_retval = 0; break; case _PC_ACL_PATH_MAX: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = ACL_MAX_ENTRIES; else *ap->a_retval = 3; break; #endif /* UFS_ACL */ case _PC_MIN_HOLE_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_PRIO_IO: *ap->a_retval = 0; break; case _PC_SYNC_IO: *ap->a_retval = 0; break; case _PC_ALLOC_SIZE_MIN: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize; break; case _PC_FILESIZEBITS: *ap->a_retval = 64; break; case _PC_REC_INCR_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_MAX_XFER_SIZE: *ap->a_retval = -1; /* means ``unlimited'' */ break; case _PC_REC_MIN_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_XFER_ALIGN: *ap->a_retval = PAGE_SIZE; break; case _PC_SYMLINK_MAX: *ap->a_retval = MAXPATHLEN; break; default: error = vop_stdpathconf(ap); break; } return (error); } /* * Vnode operation to remove a named attribute. */ static int ext2_deleteextattr(struct vop_deleteextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_delete(ip, ap->a_attrnamespace, ap->a_name); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_delete(ip, ap->a_attrnamespace, ap->a_name); return (error); } /* * Vnode operation to retrieve a named extended attribute. */ static int ext2_getextattr(struct vop_getextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to retrieve extended attributes on a vnode. */ static int ext2_listextattr(struct vop_listextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); if (error) return (error); } if (ip->i_facl) error = ext2_extattr_block_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to set a named attribute. */ static int ext2_setextattr(struct vop_setextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ext2_extattr_valid_attrname(ap->a_attrnamespace, ap->a_name); if (error) return (error); if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); if (error != ENOSPC) return (error); } error = ext2_extattr_block_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); return (error); } /* * Vnode pointer to File handle */ /* ARGSUSED */ static int ext2_vptofh(struct vop_vptofh_args *ap) { struct inode *ip; struct ufid *ufhp; ip = VTOI(ap->a_vp); ufhp = (struct ufid *)ap->a_fhp; ufhp->ufid_len = sizeof(struct ufid); ufhp->ufid_ino = ip->i_number; ufhp->ufid_gen = ip->i_gen; return (0); } /* * Initialize the vnode associated with a new inode, handle aliased * vnodes. */ int ext2_vinit(struct mount *mntp, struct vop_vector *fifoops, struct vnode **vpp) { struct inode *ip; struct vnode *vp; vp = *vpp; ip = VTOI(vp); vp->v_type = IFTOVT(ip->i_mode); if (vp->v_type == VFIFO) vp->v_op = fifoops; if (ip->i_number == EXT2_ROOTINO) vp->v_vflag |= VV_ROOT; ip->i_modrev = init_va_filerev(); *vpp = vp; return (0); } /* * Allocate a new inode. */ static int ext2_makeinode(int mode, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { struct inode *ip, *pdir; struct vnode *tvp; int error; pdir = VTOI(dvp); #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_makeinode: no name"); #endif *vpp = NULL; if ((mode & IFMT) == 0) mode |= IFREG; error = ext2_valloc(dvp, mode, cnp->cn_cred, &tvp); if (error) { return (error); } ip = VTOI(tvp); ip->i_gid = pdir->i_gid; #ifdef SUIDDIR { /* * if we are * not the owner of the directory, * and we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * Note that this drops off the execute bits for security. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (pdir->i_mode & ISUID) && (pdir->i_uid != cnp->cn_cred->cr_uid) && pdir->i_uid) { ip->i_uid = pdir->i_uid; mode &= ~07111; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = mode; tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */ ip->i_nlink = 1; if ((ip->i_mode & ISGID) && !groupmember(ip->i_gid, cnp->cn_cred)) { if (priv_check_cred(cnp->cn_cred, PRIV_VFS_RETAINSUGID, 0)) ip->i_mode &= ~ISGID; } if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; /* * Make sure inode goes to disk before directory entry. */ error = ext2_update(tvp, !DOINGASYNC(tvp)); if (error) goto bad; #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_file(dvp, tvp, mode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ error = ext2_direnter(ip, dvp, cnp); if (error) goto bad; *vpp = tvp; return (0); bad: /* * Write error occurred trying to update the inode * or the directory so must deallocate the inode. */ ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); return (error); } /* * Vnode op for reading. */ static int ext2_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; struct uio *uio; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t bytesinfile; long size, xfersize, blkoffset; int error, orig_resid, seqcount; int ioflag; vp = ap->a_vp; uio = ap->a_uio; ioflag = ap->a_ioflag; seqcount = ap->a_ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_READ) panic("%s: mode", "ext2_read"); if (vp->v_type == VLNK) { if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) panic("%s: short symlink", "ext2_read"); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("%s: type %d", "ext2_read", vp->v_type); #endif orig_resid = uio->uio_resid; KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0")); if (orig_resid == 0) return (0); KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0")); fs = ip->i_e2fs; if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) return (EOVERFLOW); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = blksize(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (lblktosize(fs, nextlbn) >= ip->i_size) error = bread(vp, lbn, size, NOCRED, &bp); else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, blkoffset + uio->uio_resid, seqcount, 0, &bp); } else if (seqcount > 1) { u_int nextsize = blksize(fs, ip, nextlbn); error = breadn(vp, lbn, size, &nextlbn, &nextsize, 1, NOCRED, &bp); } else error = bread(vp, lbn, size, NOCRED, &bp); if (error) { brelse(bp); bp = NULL; break; } /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error, * then we want to ensure that we do not uiomove bad * or uninitialized data. */ size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; vfs_bio_brelse(bp, ioflag); } /* * This can only happen in the case of an error because the loop * above resets bp to NULL on each iteration and on normal * completion has not set a new value into it. so it must have come * from a 'break' statement */ if (bp != NULL) vfs_bio_brelse(bp, ioflag); if ((error == 0 || uio->uio_resid != orig_resid) && (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) ip->i_flag |= IN_ACCESS; return (error); } static int ext2_ioctl(struct vop_ioctl_args *ap) { switch (ap->a_command) { case FIOSEEKDATA: case FIOSEEKHOLE: return (vn_bmap_seekhole(ap->a_vp, ap->a_command, (off_t *)ap->a_data, ap->a_cred)); default: return (ENOTTY); } } /* * Vnode op for writing. */ static int ext2_write(struct vop_write_args *ap) { struct vnode *vp; struct uio *uio; struct inode *ip; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn; off_t osize; int blkoffset, error, flags, ioflag, resid, size, seqcount, xfersize; ioflag = ap->a_ioflag; uio = ap->a_uio; vp = ap->a_vp; seqcount = ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_WRITE) panic("%s: mode", "ext2_write"); #endif switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = ip->i_size; if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) return (EPERM); /* FALLTHROUGH */ case VLNK: break; case VDIR: /* XXX differs from ffs -- this is called from ext2_mkdir(). */ if ((ioflag & IO_SYNC) == 0) panic("ext2_write: nonsync dir write"); break; default: panic("ext2_write: type %p %d (%jd,%jd)", (void *)vp, vp->v_type, (intmax_t)uio->uio_offset, (intmax_t)uio->uio_resid); } KASSERT(uio->uio_resid >= 0, ("ext2_write: uio->uio_resid < 0")); KASSERT(uio->uio_offset >= 0, ("ext2_write: uio->uio_offset < 0")); fs = ip->i_e2fs; if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->e2fs_maxfilesize) return (EFBIG); /* * Maybe this should be above the vnode op call, but so long as * file servers have no limits, I don't think it matters. */ if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); resid = uio->uio_resid; osize = ip->i_size; if (seqcount > BA_SEQMAX) flags = BA_SEQMAX << BA_SEQSHIFT; else flags = seqcount << BA_SEQSHIFT; if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) flags |= IO_SYNC; for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (uio->uio_offset + xfersize > ip->i_size) vnode_pager_setsize(vp, uio->uio_offset + xfersize); /* * We must perform a read-before-write if the transfer size * does not cover the entire buffer. */ if (fs->e2fs_bsize > xfersize) flags |= BA_CLRBUF; else flags &= ~BA_CLRBUF; error = ext2_balloc(ip, lbn, blkoffset + xfersize, ap->a_cred, &bp, flags); if (error != 0) break; if ((ioflag & (IO_SYNC | IO_INVAL)) == (IO_SYNC | IO_INVAL)) bp->b_flags |= B_NOCACHE; if (uio->uio_offset + xfersize > ip->i_size) ip->i_size = uio->uio_offset + xfersize; size = blksize(fs, ip, lbn) - bp->b_resid; if (size < xfersize) xfersize = size; error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); /* * If the buffer is not already filled and we encounter an * error while trying to fill it, we have to clear out any * garbage data from the pages instantiated for the buffer. * If we do not, a failed uiomove() during a write can leave * the prior contents of the pages exposed to a userland mmap. * * Note that we need only clear buffers with a transfer size * equal to the block size because buffers with a shorter * transfer size were cleared above by the call to ext2_balloc() * with the BA_CLRBUF flag set. * * If the source region for uiomove identically mmaps the * buffer, uiomove() performed the NOP copy, and the buffer * content remains valid because the page fault handler * validated the pages. */ if (error != 0 && (bp->b_flags & B_CACHE) == 0 && fs->e2fs_bsize == xfersize) vfs_bio_clrbuf(bp); vfs_bio_set_flags(bp, ioflag); /* * If IO_SYNC each buffer is written synchronously. Otherwise * if we have a severe page deficiency write the buffer * asynchronously. Otherwise try to cluster, and if that * doesn't do it then either do an async write (if O_DIRECT), * or a delayed write (if not). */ if (ioflag & IO_SYNC) { (void)bwrite(bp); } else if (vm_page_count_severe() || buf_dirty_count_severe() || (ioflag & IO_ASYNC)) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else if (xfersize + blkoffset == fs->e2fs_fsize) { if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { bp->b_flags |= B_CLUSTEROK; cluster_write(vp, bp, ip->i_size, seqcount, 0); } else { bawrite(bp); } } else if (ioflag & IO_DIRECT) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else { bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } if (error || xfersize == 0) break; } /* * If we successfully wrote any data, and we are not the superuser * we clear the setuid and setgid bits as a precaution against * tampering. */ if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ap->a_cred) { if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) ip->i_mode &= ~(ISUID | ISGID); } if (error) { if (ioflag & IO_UNIT) { (void)ext2_truncate(vp, osize, ioflag & IO_SYNC, ap->a_cred, uio->uio_td); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } } if (uio->uio_resid != resid) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (ioflag & IO_SYNC) error = ext2_update(vp, 1); } return (error); } Index: head/sys/fs/ext2fs/ext2fs.h =================================================================== --- head/sys/fs/ext2fs/ext2fs.h (revision 327583) +++ head/sys/fs/ext2fs/ext2fs.h (revision 327584) @@ -1,409 +1,419 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science * * $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Aditya Sarawgi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #ifndef _FS_EXT2FS_EXT2FS_H_ #define _FS_EXT2FS_EXT2FS_H_ #include /* * Super block for an ext2fs file system. */ struct ext2fs { uint32_t e2fs_icount; /* Inode count */ uint32_t e2fs_bcount; /* blocks count */ uint32_t e2fs_rbcount; /* reserved blocks count */ uint32_t e2fs_fbcount; /* free blocks count */ uint32_t e2fs_ficount; /* free inodes count */ uint32_t e2fs_first_dblock; /* first data block */ uint32_t e2fs_log_bsize; /* block size = 1024*(2^e2fs_log_bsize) */ uint32_t e2fs_log_fsize; /* fragment size */ uint32_t e2fs_bpg; /* blocks per group */ uint32_t e2fs_fpg; /* frags per group */ uint32_t e2fs_ipg; /* inodes per group */ uint32_t e2fs_mtime; /* mount time */ uint32_t e2fs_wtime; /* write time */ uint16_t e2fs_mnt_count; /* mount count */ uint16_t e2fs_max_mnt_count; /* max mount count */ uint16_t e2fs_magic; /* magic number */ uint16_t e2fs_state; /* file system state */ uint16_t e2fs_beh; /* behavior on errors */ uint16_t e2fs_minrev; /* minor revision level */ uint32_t e2fs_lastfsck; /* time of last fsck */ uint32_t e2fs_fsckintv; /* max time between fscks */ uint32_t e2fs_creator; /* creator OS */ uint32_t e2fs_rev; /* revision level */ uint16_t e2fs_ruid; /* default uid for reserved blocks */ uint16_t e2fs_rgid; /* default gid for reserved blocks */ /* EXT2_DYNAMIC_REV superblocks */ uint32_t e2fs_first_ino; /* first non-reserved inode */ uint16_t e2fs_inode_size; /* size of inode structure */ uint16_t e2fs_block_group_nr; /* block grp number of this sblk*/ uint32_t e2fs_features_compat; /* compatible feature set */ uint32_t e2fs_features_incompat; /* incompatible feature set */ uint32_t e2fs_features_rocompat; /* RO-compatible feature set */ uint8_t e2fs_uuid[16]; /* 128-bit uuid for volume */ char e2fs_vname[16]; /* volume name */ char e2fs_fsmnt[64]; /* name mounted on */ uint32_t e2fs_algo; /* For compression */ uint8_t e2fs_prealloc; /* # of blocks for old prealloc */ uint8_t e2fs_dir_prealloc; /* # of blocks for old prealloc dirs */ uint16_t e2fs_reserved_ngdb; /* # of reserved gd blocks for resize */ char e3fs_journal_uuid[16]; /* uuid of journal superblock */ uint32_t e3fs_journal_inum; /* inode number of journal file */ uint32_t e3fs_journal_dev; /* device number of journal file */ uint32_t e3fs_last_orphan; /* start of list of inodes to delete */ uint32_t e3fs_hash_seed[4]; /* HTREE hash seed */ char e3fs_def_hash_version;/* Default hash version to use */ char e3fs_jnl_backup_type; uint16_t e3fs_desc_size; /* size of group descriptor */ uint32_t e3fs_default_mount_opts; uint32_t e3fs_first_meta_bg; /* First metablock block group */ uint32_t e3fs_mkfs_time; /* when the fs was created */ uint32_t e3fs_jnl_blks[17]; /* backup of the journal inode */ uint32_t e4fs_bcount_hi; /* high bits of blocks count */ uint32_t e4fs_rbcount_hi; /* high bits of reserved blocks count */ uint32_t e4fs_fbcount_hi; /* high bits of free blocks count */ uint16_t e4fs_min_extra_isize; /* all inodes have some bytes */ uint16_t e4fs_want_extra_isize;/* inodes must reserve some bytes */ uint32_t e4fs_flags; /* miscellaneous flags */ uint16_t e4fs_raid_stride; /* RAID stride */ uint16_t e4fs_mmpintv; /* seconds to wait in MMP checking */ uint64_t e4fs_mmpblk; /* block for multi-mount protection */ uint32_t e4fs_raid_stripe_wid; /* blocks on data disks (N * stride) */ uint8_t e4fs_log_gpf; /* FLEX_BG group size */ uint8_t e4fs_chksum_type; /* metadata checksum algorithm used */ uint8_t e4fs_encrypt; /* versioning level for encryption */ uint8_t e4fs_reserved_pad; uint64_t e4fs_kbytes_written; /* number of lifetime kilobytes */ uint32_t e4fs_snapinum; /* inode number of active snapshot */ uint32_t e4fs_snapid; /* sequential ID of active snapshot */ uint64_t e4fs_snaprbcount; /* reserved blocks for active snapshot */ uint32_t e4fs_snaplist; /* inode number for on-disk snapshot */ uint32_t e4fs_errcount; /* number of file system errors */ uint32_t e4fs_first_errtime; /* first time an error happened */ uint32_t e4fs_first_errino; /* inode involved in first error */ uint64_t e4fs_first_errblk; /* block involved of first error */ uint8_t e4fs_first_errfunc[32];/* function where error happened */ uint32_t e4fs_first_errline; /* line number where error happened */ uint32_t e4fs_last_errtime; /* most recent time of an error */ uint32_t e4fs_last_errino; /* inode involved in last error */ uint32_t e4fs_last_errline; /* line number where error happened */ uint64_t e4fs_last_errblk; /* block involved of last error */ uint8_t e4fs_last_errfunc[32]; /* function where error happened */ uint8_t e4fs_mount_opts[64]; uint32_t e4fs_usrquota_inum; /* inode for tracking user quota */ uint32_t e4fs_grpquota_inum; /* inode for tracking group quota */ uint32_t e4fs_overhead_clusters;/* overhead blocks/clusters */ uint32_t e4fs_backup_bgs[2]; /* groups with sparse_super2 SBs */ uint8_t e4fs_encrypt_algos[4];/* encryption algorithms in use */ uint8_t e4fs_encrypt_pw_salt[16];/* salt used for string2key */ uint32_t e4fs_lpf_ino; /* location of the lost+found inode */ uint32_t e4fs_proj_quota_inum; /* inode for tracking project quota */ uint32_t e4fs_chksum_seed; /* checksum seed */ uint32_t e4fs_reserved[98]; /* padding to the end of the block */ uint32_t e4fs_sbchksum; /* superblock checksum */ }; /* * The path name on which the file system is mounted is maintained * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in * the super block for this name. */ #define MAXMNTLEN 512 /* * In-Memory Superblock */ struct m_ext2fs { struct ext2fs * e2fs; char e2fs_fsmnt[MAXMNTLEN];/* name mounted on */ char e2fs_ronly; /* mounted read-only flag */ char e2fs_fmod; /* super block modified flag */ + uint64_t e2fs_bcount; /* blocks count */ + uint64_t e2fs_rbcount; /* reserved blocks count */ + uint64_t e2fs_fbcount; /* free blocks count */ uint32_t e2fs_bsize; /* Block size */ uint32_t e2fs_bshift; /* calc of logical block no */ uint32_t e2fs_bpg; /* Number of blocks per group */ int64_t e2fs_qbmask; /* = s_blocksize -1 */ uint32_t e2fs_fsbtodb; /* Shift to get disk block */ uint32_t e2fs_ipg; /* Number of inodes per group */ uint32_t e2fs_ipb; /* Number of inodes per block */ uint32_t e2fs_itpg; /* Number of inode table per group */ uint32_t e2fs_fsize; /* Size of fragments per block */ uint32_t e2fs_fpb; /* Number of fragments per block */ uint32_t e2fs_fpg; /* Number of fragments per group */ uint32_t e2fs_gdbcount; /* Number of group descriptors */ uint32_t e2fs_gcount; /* Number of groups */ uint32_t e2fs_isize; /* Size of inode */ uint32_t e2fs_total_dir; /* Total number of directories */ uint8_t *e2fs_contigdirs; /* (u) # of contig. allocated dirs */ char e2fs_wasvalid; /* valid at mount time */ off_t e2fs_maxfilesize; struct ext2_gd *e2fs_gd; /* Group Descriptors */ int32_t e2fs_contigsumsize; /* size of cluster summary array */ int32_t *e2fs_maxcluster; /* max cluster in each cyl group */ struct csum *e2fs_clustersum; /* cluster summary in each cyl group */ int32_t e2fs_uhash; /* 3 if hash should be signed, 0 if not */ }; /* cluster summary information */ struct csum { int8_t cs_init; /* cluster summary has been initialized */ int32_t *cs_sum; /* cluster summary array */ }; /* * The second extended file system magic number */ #define E2FS_MAGIC 0xEF53 /* * Revision levels */ #define E2FS_REV0 0 /* The good old (original) format */ #define E2FS_REV1 1 /* V2 format w/ dynamic inode sizes */ #define E2FS_REV0_INODE_SIZE 128 /* * compatible/incompatible features */ #define EXT2F_COMPAT_PREALLOC 0x0001 #define EXT2F_COMPAT_IMAGIC_INODES 0x0002 #define EXT2F_COMPAT_HASJOURNAL 0x0004 #define EXT2F_COMPAT_EXT_ATTR 0x0008 #define EXT2F_COMPAT_RESIZE 0x0010 #define EXT2F_COMPAT_DIRHASHINDEX 0x0020 #define EXT2F_COMPAT_LAZY_BG 0x0040 #define EXT2F_COMPAT_EXCLUDE_BITMAP 0x0100 #define EXT2F_COMPAT_SPARSESUPER2 0x0200 #define EXT2F_ROCOMPAT_SPARSESUPER 0x0001 #define EXT2F_ROCOMPAT_LARGEFILE 0x0002 #define EXT2F_ROCOMPAT_BTREE_DIR 0x0004 #define EXT2F_ROCOMPAT_HUGE_FILE 0x0008 #define EXT2F_ROCOMPAT_GDT_CSUM 0x0010 #define EXT2F_ROCOMPAT_DIR_NLINK 0x0020 #define EXT2F_ROCOMPAT_EXTRA_ISIZE 0x0040 #define EXT2F_ROCOMPAT_HAS_SNAPSHOT 0x0080 #define EXT2F_ROCOMPAT_QUOTA 0x0100 #define EXT2F_ROCOMPAT_BIGALLOC 0x0200 #define EXT2F_ROCOMPAT_METADATA_CKSUM 0x0400 #define EXT2F_ROCOMPAT_REPLICA 0x0800 #define EXT2F_ROCOMPAT_READONLY 0x1000 #define EXT2F_ROCOMPAT_PROJECT 0x2000 #define EXT2F_INCOMPAT_COMP 0x0001 #define EXT2F_INCOMPAT_FTYPE 0x0002 #define EXT2F_INCOMPAT_RECOVER 0x0004 #define EXT2F_INCOMPAT_JOURNAL_DEV 0x0008 #define EXT2F_INCOMPAT_META_BG 0x0010 #define EXT2F_INCOMPAT_EXTENTS 0x0040 #define EXT2F_INCOMPAT_64BIT 0x0080 #define EXT2F_INCOMPAT_MMP 0x0100 #define EXT2F_INCOMPAT_FLEX_BG 0x0200 #define EXT2F_INCOMPAT_EA_INODE 0x0400 #define EXT2F_INCOMPAT_DIRDATA 0x1000 #define EXT2F_INCOMPAT_CSUM_SEED 0x2000 #define EXT2F_INCOMPAT_LARGEDIR 0x4000 #define EXT2F_INCOMPAT_INLINE_DATA 0x8000 #define EXT2F_INCOMPAT_ENCRYPT 0x10000 struct ext2_feature { int mask; const char *name; }; static const struct ext2_feature compat[] = { { EXT2F_COMPAT_PREALLOC, "dir_prealloc" }, { EXT2F_COMPAT_IMAGIC_INODES, "imagic_inodes" }, { EXT2F_COMPAT_HASJOURNAL, "has_journal" }, { EXT2F_COMPAT_EXT_ATTR, "ext_attr" }, { EXT2F_COMPAT_RESIZE, "resize_inode" }, { EXT2F_COMPAT_DIRHASHINDEX, "dir_index" }, { EXT2F_COMPAT_EXCLUDE_BITMAP, "snapshot_bitmap" }, { EXT2F_COMPAT_SPARSESUPER2, "sparse_super2" } }; static const struct ext2_feature ro_compat[] = { { EXT2F_ROCOMPAT_SPARSESUPER, "sparse_super" }, { EXT2F_ROCOMPAT_LARGEFILE, "large_file" }, { EXT2F_ROCOMPAT_BTREE_DIR, "btree_dir" }, { EXT2F_ROCOMPAT_HUGE_FILE, "huge_file" }, { EXT2F_ROCOMPAT_GDT_CSUM, "uninit_groups" }, { EXT2F_ROCOMPAT_DIR_NLINK, "dir_nlink" }, { EXT2F_ROCOMPAT_EXTRA_ISIZE, "extra_isize" }, { EXT2F_ROCOMPAT_HAS_SNAPSHOT, "snapshot" }, { EXT2F_ROCOMPAT_QUOTA, "quota" }, { EXT2F_ROCOMPAT_BIGALLOC, "bigalloc" }, { EXT2F_ROCOMPAT_METADATA_CKSUM, "metadata_csum" }, { EXT2F_ROCOMPAT_REPLICA, "replica" }, { EXT2F_ROCOMPAT_READONLY, "ro" }, { EXT2F_ROCOMPAT_PROJECT, "project" } }; static const struct ext2_feature incompat[] = { { EXT2F_INCOMPAT_COMP, "compression" }, { EXT2F_INCOMPAT_FTYPE, "filetype" }, { EXT2F_INCOMPAT_RECOVER, "needs_recovery" }, { EXT2F_INCOMPAT_JOURNAL_DEV, "journal_dev" }, { EXT2F_INCOMPAT_META_BG, "meta_bg" }, { EXT2F_INCOMPAT_EXTENTS, "extents" }, { EXT2F_INCOMPAT_64BIT, "64bit" }, { EXT2F_INCOMPAT_MMP, "mmp" }, { EXT2F_INCOMPAT_FLEX_BG, "flex_bg" }, { EXT2F_INCOMPAT_EA_INODE, "ea_inode" }, { EXT2F_INCOMPAT_DIRDATA, "dirdata" }, { EXT2F_INCOMPAT_CSUM_SEED, "metadata_csum_seed" }, { EXT2F_INCOMPAT_LARGEDIR, "large_dir" }, { EXT2F_INCOMPAT_INLINE_DATA, "inline_data" }, { EXT2F_INCOMPAT_ENCRYPT, "encrypt" } }; /* * Features supported in this implementation * * We support the following REV1 features: * - EXT2F_ROCOMPAT_SPARSESUPER * - EXT2F_ROCOMPAT_LARGEFILE * - EXT2F_ROCOMPAT_EXTRA_ISIZE * - EXT2F_INCOMPAT_FTYPE * * We partially (read-only) support the following EXT4 features: * - EXT2F_ROCOMPAT_HUGE_FILE * - EXT2F_INCOMPAT_EXTENTS * * We do not support these EXT4 features but they are irrelevant * for read-only support: * - EXT2F_INCOMPAT_RECOVER * - EXT2F_INCOMPAT_FLEX_BG * - EXT2F_INCOMPAT_META_BG */ #define EXT2F_COMPAT_SUPP EXT2F_COMPAT_DIRHASHINDEX #define EXT2F_ROCOMPAT_SUPP (EXT2F_ROCOMPAT_SPARSESUPER | \ EXT2F_ROCOMPAT_LARGEFILE | \ EXT2F_ROCOMPAT_GDT_CSUM | \ EXT2F_ROCOMPAT_DIR_NLINK | \ EXT2F_ROCOMPAT_HUGE_FILE | \ EXT2F_ROCOMPAT_EXTRA_ISIZE) -#define EXT2F_INCOMPAT_SUPP EXT2F_INCOMPAT_FTYPE +#define EXT2F_INCOMPAT_SUPP (EXT2F_INCOMPAT_FTYPE | \ + EXT2F_INCOMPAT_64BIT) #define EXT4F_RO_INCOMPAT_SUPP (EXT2F_INCOMPAT_EXTENTS | \ EXT2F_INCOMPAT_RECOVER | \ EXT2F_INCOMPAT_FLEX_BG | \ EXT2F_INCOMPAT_META_BG ) /* Assume that user mode programs are passing in an ext2fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test * macros from user land. */ #define EXT2_SB(sb) (sb) /* * Feature set definitions */ #define EXT2_HAS_COMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_compat & htole32(mask) ) #define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_rocompat & htole32(mask) ) #define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_incompat & htole32(mask) ) /* * File clean flags */ #define E2FS_ISCLEAN 0x0001 /* Unmounted cleanly */ #define E2FS_ERRORS 0x0002 /* Errors detected */ /* * Filesystem miscellaneous flags */ #define E2FS_SIGNED_HASH 0x0001 #define E2FS_UNSIGNED_HASH 0x0002 #define EXT2_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ #define EXT2_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ #define EXT2_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ /* ext2 file system block group descriptor */ struct ext2_gd { uint32_t ext2bgd_b_bitmap; /* blocks bitmap block */ uint32_t ext2bgd_i_bitmap; /* inodes bitmap block */ uint32_t ext2bgd_i_tables; /* inodes table block */ uint16_t ext2bgd_nbfree; /* number of free blocks */ uint16_t ext2bgd_nifree; /* number of free inodes */ uint16_t ext2bgd_ndirs; /* number of directories */ uint16_t ext4bgd_flags; /* block group flags */ uint32_t ext4bgd_x_bitmap; /* snapshot exclusion bitmap loc. */ uint16_t ext4bgd_b_bmap_csum; /* block bitmap checksum */ uint16_t ext4bgd_i_bmap_csum; /* inode bitmap checksum */ uint16_t ext4bgd_i_unused; /* unused inode count */ uint16_t ext4bgd_csum; /* group descriptor checksum */ + uint32_t ext4bgd_b_bitmap_hi; /* high bits of blocks bitmap block */ + uint32_t ext4bgd_i_bitmap_hi; /* high bits of inodes bitmap block */ + uint32_t ext4bgd_i_tables_hi; /* high bits of inodes table block */ + uint16_t ext4bgd_nbfree_hi; /* high bits of number of free blocks */ + uint16_t ext4bgd_nifree_hi; /* high bits of number of free inodes */ + uint16_t ext4bgd_ndirs_hi; /* high bits of number of directories */ + uint16_t ext4bgd_i_unused_hi; /* high bits of unused inode count */ + uint32_t ext4bgd_x_bitmap_hi; /* high bits of snapshot exclusion */ + uint16_t ext4bgd_b_bmap_csum_hi;/* high bits of block bitmap checksum */ + uint16_t ext4bgd_i_bmap_csum_hi;/* high bits of inode bitmap checksum */ + uint32_t ext4bgd_reserved; }; -/* EXT2FS metadata is stored in little-endian byte order. These macros - * help reading it. - */ - -#define e2fs_cgload(old, new, size) memcpy((new), (old), (size)); -#define e2fs_cgsave(old, new, size) memcpy((new), (old), (size)); +#define E2FS_REV0_GD_SIZE (sizeof(struct ext2_gd) / 2) /* * Macro-instructions used to manage several block sizes */ #define EXT2_MIN_BLOCK_LOG_SIZE 10 #define EXT2_BLOCK_SIZE(s) ((s)->e2fs_bsize) #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof(uint32_t)) #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->e2fs_isize) /* * Macro-instructions used to manage fragments */ #define EXT2_MIN_FRAG_SIZE 1024 #define EXT2_MAX_FRAG_SIZE 4096 #define EXT2_MIN_FRAG_LOG_SIZE 10 #define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->e2fs_fsize) #define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->e2fs_fpb) /* * Macro-instructions used to manage group descriptors */ #define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->e2fs_bpg) #endif /* !_FS_EXT2FS_EXT2FS_H_ */ Index: head/sys/fs/ext2fs/fs.h =================================================================== --- head/sys/fs/ext2fs/fs.h (revision 327583) +++ head/sys/fs/ext2fs/fs.h (revision 327584) @@ -1,164 +1,164 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)fs.h 8.7 (Berkeley) 4/19/94 * $FreeBSD$ */ #ifndef _FS_EXT2FS_FS_H_ #define _FS_EXT2FS_FS_H_ /* * Each disk drive contains some number of file systems. * A file system consists of a number of cylinder groups. * Each cylinder group has inodes and data. * * A file system is described by its super-block, which in turn * describes the cylinder groups. The super-block is critical * data and is replicated in each cylinder group to protect against * catastrophic loss. This is done at `newfs' time and the critical * super-block data does not change, so the copies need not be * referenced further unless disaster strikes. * * The first boot and super blocks are given in absolute disk addresses. * The byte-offset forms are preferred, as they don't imply a sector size. */ #define SBSIZE 1024 #define SBLOCK 2 /* * The path name on which the file system is mounted is maintained * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in * the super block for this name. */ #define MAXMNTLEN 512 /* * A summary of contiguous blocks of various sizes is maintained * in each cylinder group. Normally this is set by the initial * value of fs_maxcontig. * * XXX:FS_MAXCONTIG is set to 16 to conserve space. Here we set * EXT2_MAXCONTIG to 32 for better performance. */ #define EXT2_MAXCONTIG 32 /* * Grigoriy Orlov has done some extensive work to fine * tune the layout preferences for directories within a filesystem. * His algorithm can be tuned by adjusting the following parameters * which tell the system the average file size and the average number * of files per directory. These defaults are well selected for typical * filesystems, but may need to be tuned for odd cases like filesystems * being used for squid caches or news spools. * AVFPDIR is the expected number of files per directory. AVGDIRSIZE is * obtained by multiplying AVFPDIR and AVFILESIZ which is assumed to be * 16384. */ #define AFPDIR 64 #define AVGDIRSIZE 1048576 /* * Macros for access to superblock array structures */ /* * Turn file system block numbers into disk block addresses. * This maps file system blocks to device size blocks. */ #define fsbtodb(fs, b) ((daddr_t)(b) << (fs)->e2fs_fsbtodb) #define dbtofsb(fs, b) ((b) >> (fs)->e2fs_fsbtodb) /* get group containing inode */ #define ino_to_cg(fs, x) (((x) - 1) / (fs->e2fs_ipg)) /* get block containing inode from its number x */ #define ino_to_fsba(fs, x) \ - ((fs)->e2fs_gd[ino_to_cg((fs), (x))].ext2bgd_i_tables + \ + (e2fs_gd_get_i_tables(&(fs)->e2fs_gd[ino_to_cg((fs), (x))]) + \ (((x) - 1) % (fs)->e2fs->e2fs_ipg) / (fs)->e2fs_ipb) /* get offset for inode in block */ #define ino_to_fsbo(fs, x) ((x-1) % (fs->e2fs_ipb)) /* * Give cylinder group number for a file system block. * Give cylinder group block number for a file system block. */ #define dtog(fs, d) (((d) - fs->e2fs->e2fs_first_dblock) / \ EXT2_BLOCKS_PER_GROUP(fs)) #define dtogd(fs, d) (((d) - fs->e2fs->e2fs_first_dblock) % \ EXT2_BLOCKS_PER_GROUP(fs)) /* * The following macros optimize certain frequently calculated * quantities by using shifts and masks in place of divisions * modulos and multiplications. */ #define blkoff(fs, loc) /* calculates (loc % fs->fs_bsize) */ \ ((loc) & (fs)->e2fs_qbmask) #define lblktosize(fs, blk) /* calculates (blk * fs->fs_bsize) */ \ ((blk) << (fs->e2fs_bshift)) #define lblkno(fs, loc) /* calculates (loc / fs->fs_bsize) */ \ ((loc) >> (fs->e2fs_bshift)) /* no fragments -> logical block number equal # of frags */ #define numfrags(fs, loc) /* calculates (loc / fs->fs_fsize) */ \ ((loc) >> (fs->e2fs_bshift)) #define fragroundup(fs, size) /* calculates roundup(size, fs->fs_fsize) */ \ roundup(size, fs->e2fs_fsize) /* was (((size) + (fs)->fs_qfmask) & (fs)->fs_fmask) */ /* * Determining the size of a file block in the file system. * easy w/o fragments */ #define blksize(fs, ip, lbn) ((fs)->e2fs_fsize) /* * INOPB is the number of inodes in a secondary storage block. */ #define INOPB(fs) (fs->e2fs_ipb) /* * NINDIR is the number of indirects in a file system block. */ #define NINDIR(fs) (EXT2_ADDR_PER_BLOCK(fs)) #endif /* !_FS_EXT2FS_FS_H_ */