Index: head/sys/fs/ext2fs/ext2_alloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_alloc.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_alloc.c (revision 324706) @@ -1,1340 +1,1344 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static daddr_t ext2_alloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_clusteralloc(struct inode *, int, daddr_t, int); static u_long ext2_dirpref(struct inode *); static u_long ext2_hashalloc(struct inode *, int, long, int, daddr_t (*)(struct inode *, int, daddr_t, int)); static daddr_t ext2_nodealloccg(struct inode *, int, daddr_t, int); static daddr_t ext2_mapsearch(struct m_ext2fs *, char *, daddr_t); /* * Allocate a block in the filesystem. * * A preference may be optionally specified. If a preference is given * the following hierarchy is used to allocate a block: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate a block in the same cylinder group. * 4) quadradically rehash into other cylinder groups, until an * available block is located. * If no block preference is given the following hierarchy is used * to allocate a block: * 1) allocate a block in the cylinder group that contains the * inode for the file. * 2) quadradically rehash into other cylinder groups, until an * available block is located. */ int ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size, struct ucred *cred, e4fs_daddr_t *bnp) { struct m_ext2fs *fs; struct ext2mount *ump; int32_t bno; int cg; *bnp = 0; fs = ip->i_e2fs; ump = ip->i_ump; mtx_assert(EXT2_MTX(ump), MA_OWNED); #ifdef INVARIANTS if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) { vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n", (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt); panic("ext2_alloc: bad size"); } if (cred == NOCRED) panic("ext2_alloc: missing credential"); #endif /* INVARIANTS */ if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0) goto nospace; if (cred->cr_uid != 0 && fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount) goto nospace; if (bpref >= fs->e2fs->e2fs_bcount) bpref = 0; if (bpref == 0) cg = ino_to_cg(fs, ip->i_number); else cg = dtog(fs, bpref); bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize, ext2_alloccg); if (bno > 0) { /* set next_alloc fields as done in block_getblk */ ip->i_next_alloc_block = lbn; ip->i_next_alloc_goal = bno; ip->i_blocks += btodb(fs->e2fs_bsize); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bnp = bno; return (0); } nospace: EXT2_UNLOCK(ump); ext2_fserr(fs, cred->cr_uid, "filesystem full"); uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt); return (ENOSPC); } /* * Allocate EA's block for inode. */ daddr_t -ext2_allocfacl(struct inode *ip) +ext2_alloc_meta(struct inode *ip) { struct m_ext2fs *fs; - daddr_t facl; + daddr_t blk; fs = ip->i_e2fs; EXT2_LOCK(ip->i_ump); - facl = ext2_alloccg(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize); - if (0 == facl) + blk = ext2_hashalloc(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize, + ext2_alloccg); + if (0 == blk) EXT2_UNLOCK(ip->i_ump); - return (facl); + return (blk); } /* * Reallocate a sequence of blocks into a contiguous sequence of blocks. * * The vnode and an array of buffer pointers for a range of sequential * logical blocks to be made contiguous is given. The allocator attempts * to find a range of sequential blocks starting as close as possible to * an fs_rotdelay offset from the end of the allocation for the logical * block immediately preceding the current range. If successful, the * physical block numbers in the buffer pointers and in the inode are * changed to reflect the new allocation. If unsuccessful, the allocation * is left unchanged. The success in doing the reallocation is returned. * Note that the error return is not reflected back to the user. Rather * the previous block allocation will be used. */ static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem"); static int doasyncfree = 1; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "Use asychronous writes to update block pointers when freeing blocks"); static int doreallocblks = 1; SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); int ext2_reallocblks(struct vop_reallocblks_args *ap) { struct m_ext2fs *fs; struct inode *ip; struct vnode *vp; struct buf *sbp, *ebp; uint32_t *bap, *sbap, *ebap; struct ext2mount *ump; struct cluster_save *buflist; struct indir start_ap[EXT2_NIADDR + 1], end_ap[EXT2_NIADDR + 1], *idp; e2fs_lbn_t start_lbn, end_lbn; int soff; e2fs_daddr_t newblk, blkno; int i, len, start_lvl, end_lvl, pref, ssize; if (doreallocblks == 0) return (ENOSPC); vp = ap->a_vp; ip = VTOI(vp); fs = ip->i_e2fs; ump = ip->i_ump; - if (fs->e2fs_contigsumsize <= 0) + if (fs->e2fs_contigsumsize <= 0 || ip->i_flag & IN_E4EXTENTS) return (ENOSPC); buflist = ap->a_buflist; len = buflist->bs_nchildren; start_lbn = buflist->bs_children[0]->b_lblkno; end_lbn = start_lbn + len - 1; #ifdef INVARIANTS for (i = 1; i < len; i++) if (buflist->bs_children[i]->b_lblkno != start_lbn + i) panic("ext2_reallocblks: non-cluster"); #endif /* * If the cluster crosses the boundary for the first indirect * block, leave space for the indirect block. Indirect blocks * are initially laid out in a position after the last direct * block. Block reallocation would usually destroy locality by * moving the indirect block out of the way to make room for * data blocks if we didn't compensate here. We should also do * this for other indirect block boundaries, but it is only * important for the first one. */ if (start_lbn < EXT2_NDADDR && end_lbn >= EXT2_NDADDR) return (ENOSPC); /* * If the latest allocation is in a new cylinder group, assume that * the filesystem has decided to move and do not force it back to * the previous cylinder group. */ if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) return (ENOSPC); if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) || ext2_getlbns(vp, end_lbn, end_ap, &end_lvl)) return (ENOSPC); /* * Get the starting offset and block map for the first block. */ if (start_lvl == 0) { sbap = &ip->i_db[0]; soff = start_lbn; } else { idp = &start_ap[start_lvl - 1]; if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) { brelse(sbp); return (ENOSPC); } sbap = (u_int *)sbp->b_data; soff = idp->in_off; } /* * If the block range spans two block maps, get the second map. */ ebap = NULL; if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { ssize = len; } else { #ifdef INVARIANTS if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn) panic("ext2_reallocblks: start == end"); #endif ssize = len - (idp->in_off + 1); if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp)) goto fail; ebap = (u_int *)ebp->b_data; } /* * Find the preferred location for the cluster. */ EXT2_LOCK(ump); pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0); /* * Search the block map looking for an allocation of the desired size. */ if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref, len, ext2_clusteralloc)) == 0) { EXT2_UNLOCK(ump); goto fail; } /* * We have found a new contiguous block. * * First we have to replace the old block pointers with the new * block pointers in the inode and indirect blocks associated * with the file. */ #ifdef DEBUG printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn); #endif /* DEBUG */ blkno = newblk; for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) { if (i == ssize) { bap = ebap; soff = -i; } #ifdef INVARIANTS if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap)) panic("ext2_reallocblks: alloc mismatch"); #endif #ifdef DEBUG printf(" %d,", *bap); #endif /* DEBUG */ *bap++ = blkno; } /* * Next we must write out the modified inode and indirect blocks. * For strict correctness, the writes should be synchronous since * the old block values may have been written to disk. In practise * they are almost never written, but if we are concerned about * strict correctness, the `doasyncfree' flag should be set to zero. * * The test on `doasyncfree' should be changed to test a flag * that shows whether the associated buffers and inodes have * been written. The flag should be set when the cluster is * started and cleared whenever the buffer or inode is flushed. * We can then check below to see if it is set, and do the * synchronous write only when it has been cleared. */ if (sbap != &ip->i_db[0]) { if (doasyncfree) bdwrite(sbp); else bwrite(sbp); } else { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (!doasyncfree) ext2_update(vp, 1); } if (ssize < len) { if (doasyncfree) bdwrite(ebp); else bwrite(ebp); } /* * Last, free the old blocks and assign the new blocks to the buffers. */ #ifdef DEBUG printf("\n\tnew:"); #endif /* DEBUG */ for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) { ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->e2fs_bsize); buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); #ifdef DEBUG printf(" %d,", blkno); #endif /* DEBUG */ } #ifdef DEBUG printf("\n"); #endif /* DEBUG */ return (0); fail: if (ssize < len) brelse(ebp); if (sbap != &ip->i_db[0]) brelse(sbp); return (ENOSPC); } /* * Allocate an inode in the filesystem. * */ int ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) { struct timespec ts; struct inode *pip; struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; ino_t ino, ipref; - int i, error, cg; + int error, cg; *vpp = NULL; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; EXT2_LOCK(ump); if (fs->e2fs->e2fs_ficount == 0) goto noinodes; /* * If it is a directory then obtain a cylinder group based on * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is * always the next inode. */ if ((mode & IFMT) == IFDIR) { cg = ext2_dirpref(pip); if (fs->e2fs_contigdirs[cg] < 255) fs->e2fs_contigdirs[cg]++; } else { cg = ino_to_cg(fs, pip->i_number); if (fs->e2fs_contigdirs[cg] > 0) fs->e2fs_contigdirs[cg]--; } ipref = cg * fs->e2fs->e2fs_ipg + 1; ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg); if (ino == 0) goto noinodes; error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { ext2_vfree(pvp, ino, mode); return (error); } ip = VTOI(*vpp); /* * The question is whether using VGET was such good idea at all: * Linux doesn't read the old inode in when it is allocating a * new one. I will set at least i_size and i_blocks to zero. */ ip->i_flag = 0; ip->i_size = 0; ip->i_blocks = 0; ip->i_mode = 0; ip->i_flags = 0; - /* now we want to make sure that the block pointers are zeroed out */ - for (i = 0; i < EXT2_NDADDR; i++) - ip->i_db[i] = 0; - for (i = 0; i < EXT2_NIADDR; i++) - ip->i_ib[i] = 0; + if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_EXTENTS) + && (S_ISREG(mode) || S_ISDIR(mode))) + ext4_ext_tree_init(ip); + else + memset(ip->i_data, 0, sizeof(ip->i_data)); + /* * Set up a new generation number for this inode. * Avoid zero values. */ do { ip->i_gen = arc4random(); } while (ip->i_gen == 0); vfs_timestamp(&ts); ip->i_birthtime = ts.tv_sec; ip->i_birthnsec = ts.tv_nsec; /* printf("ext2_valloc: allocated inode %d\n", ino); */ return (0); noinodes: EXT2_UNLOCK(ump); ext2_fserr(fs, cred->cr_uid, "out of inodes"); uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt); return (ENOSPC); } /* * Find a cylinder to place a directory. * * The policy implemented by this algorithm is to allocate a * directory inode in the same cylinder group as its parent * directory, but also to reserve space for its files inodes * and data. Restrict the number of directories which may be * allocated one after another in the same cylinder group * without intervening allocation of files. * * If we allocate a first level directory then force allocation * in another cylinder group. * */ static u_long ext2_dirpref(struct inode *pip) { struct m_ext2fs *fs; int cg, prefcg, cgsize; u_int avgifree, avgbfree, avgndir, curdirsize; u_int minifree, minbfree, maxndir; u_int mincg, minndir; u_int dirsize, maxcontigdirs; mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED); fs = pip->i_e2fs; avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount; avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount; avgndir = fs->e2fs_total_dir / fs->e2fs_gcount; /* * Force allocation in another cg if creating a first level dir. */ ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref"); if (ITOV(pip)->v_vflag & VV_ROOT) { prefcg = arc4random() % fs->e2fs_gcount; mincg = prefcg; minndir = fs->e2fs_ipg; for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { mincg = cg; minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; } for (cg = 0; cg < prefcg; cg++) if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir && fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree && fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) { mincg = cg; minndir = fs->e2fs_gd[cg].ext2bgd_ndirs; } return (mincg); } /* * Count various limits which used for * optimal allocation of a directory inode. */ maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg); minifree = avgifree - avgifree / 4; if (minifree < 1) minifree = 1; minbfree = avgbfree - avgbfree / 4; if (minbfree < 1) minbfree = 1; cgsize = fs->e2fs_fsize * fs->e2fs_fpg; dirsize = AVGDIRSIZE; curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0; if (dirsize < curdirsize) dirsize = curdirsize; maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255); maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR); if (maxcontigdirs == 0) maxcontigdirs = 1; /* * Limit number of dirs in one cg and reserve space for * regular files, but only if we have no deficit in * inodes or space. */ prefcg = ino_to_cg(fs, pip->i_number); for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } for (cg = 0; cg < prefcg; cg++) if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir && fs->e2fs_gd[cg].ext2bgd_nifree >= minifree && fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) { if (fs->e2fs_contigdirs[cg] < maxcontigdirs) return (cg); } /* * This is a backstop when we have deficit in space. */ for (cg = prefcg; cg < fs->e2fs_gcount; cg++) if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) return (cg); for (cg = 0; cg < prefcg; cg++) if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree) break; return (cg); } /* * Select the desired position for the next block in a file. * * we try to mimic what Remy does in inode_getblk/block_getblk * * we note: blocknr == 0 means that we're about to allocate either * a direct block or a pointer block at the first level of indirection * (In other words, stuff that will go in i_db[] or i_ib[]) * * blocknr != 0 means that we're allocating a block that is none * of the above. Then, blocknr tells us the number of the block * that will hold the pointer */ e4fs_daddr_t ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap, e2fs_daddr_t blocknr) { + struct m_ext2fs *fs; int tmp; + fs = ip->i_e2fs; + mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); /* * If the next block is actually what we thought it is, then set the * goal to what we thought it should be. */ if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0) return ip->i_next_alloc_goal; /* * Now check whether we were provided with an array that basically * tells us previous blocks to which we want to stay close. */ if (bap) for (tmp = indx - 1; tmp >= 0; tmp--) if (bap[tmp]) return bap[tmp]; /* * Else lets fall back to the blocknr or, if there is none, follow * the rule that a block should be allocated near its inode. */ - return blocknr ? blocknr : + return (blocknr ? blocknr : (e2fs_daddr_t)(ip->i_block_group * - EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + - ip->i_e2fs->e2fs->e2fs_first_dblock; + EXT2_BLOCKS_PER_GROUP(fs)) + fs->e2fs->e2fs_first_dblock); } /* * Implement the cylinder overflow algorithm. * * The policy implemented by this algorithm is: * 1) allocate the block in its requested cylinder group. * 2) quadradically rehash on the cylinder group number. * 3) brute force search for a free block. */ static u_long ext2_hashalloc(struct inode *ip, int cg, long pref, int size, daddr_t (*allocator) (struct inode *, int, daddr_t, int)) { struct m_ext2fs *fs; ino_t result; int i, icg = cg; mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED); fs = ip->i_e2fs; /* * 1: preferred cylinder group */ result = (*allocator)(ip, cg, pref, size); if (result) return (result); /* * 2: quadratic rehash */ for (i = 1; i < fs->e2fs_gcount; i *= 2) { cg += i; if (cg >= fs->e2fs_gcount) cg -= fs->e2fs_gcount; result = (*allocator)(ip, cg, 0, size); if (result) return (result); } /* * 3: brute force search * Note that we start at i == 2, since 0 was checked initially, * and 1 is always checked in the quadratic rehash. */ cg = (icg + 2) % fs->e2fs_gcount; for (i = 2; i < fs->e2fs_gcount; i++) { result = (*allocator)(ip, cg, 0, size); if (result) return (result); cg++; if (cg == fs->e2fs_gcount) cg = 0; } return (0); } static unsigned long ext2_cg_num_gdb(struct m_ext2fs *fs, int cg) { int gd_per_block, metagroup, first, last; gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); metagroup = cg / gd_per_block; first = metagroup * gd_per_block; last = first + gd_per_block - 1; if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || metagroup < fs->e2fs->e3fs_first_meta_bg) { if (!ext2_cg_has_sb(fs, cg)) return (0); if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG)) return (fs->e2fs->e3fs_first_meta_bg); return (fs->e2fs_gdbcount); } if (cg == first || cg == first + 1 || cg == last) return (1); return (0); } static int ext2_num_base_meta_blocks(struct m_ext2fs *fs, int cg) { int num, gd_per_block; gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd); num = ext2_cg_has_sb(fs, cg); if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) || cg < fs->e2fs->e3fs_first_meta_bg * gd_per_block) { if (num) { num += ext2_cg_num_gdb(fs, cg); num += fs->e2fs->e2fs_reserved_ngdb; } } else { num += ext2_cg_num_gdb(fs, cg); } return (num); } static int ext2_get_cg_number(struct m_ext2fs *fs, daddr_t blk) { int cg; if (fs->e2fs->e2fs_bpg == fs->e2fs_bsize * 8) cg = (blk - fs->e2fs->e2fs_first_dblock) / (fs->e2fs_bsize * 8); else cg = blk - fs->e2fs->e2fs_first_dblock; return (cg); } static void ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) { int i; if (start_bit >= end_bit) return; for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) setbit(bitmap, i); if (i < end_bit) memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); } static int ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp) { int bit, bit_max, inodes_per_block; uint32_t start, tmp; if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) || !(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT)) return (0); memset(bp->b_data, 0, fs->e2fs_bsize); bit_max = ext2_num_base_meta_blocks(fs, cg); if ((bit_max >> 3) >= fs->e2fs_bsize) return (EINVAL); for (bit = 0; bit < bit_max; bit++) setbit(bp->b_data, bit); start = cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock; /* Set bits for block and inode bitmaps, and inode table */ tmp = fs->e2fs_gd[cg].ext2bgd_b_bitmap; if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || tmp == ext2_get_cg_number(fs, cg)) setbit(bp->b_data, tmp - start); tmp = fs->e2fs_gd[cg].ext2bgd_i_bitmap; if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || tmp == ext2_get_cg_number(fs, cg)) setbit(bp->b_data, tmp - start); tmp = fs->e2fs_gd[cg].ext2bgd_i_tables; inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs); while( tmp < fs->e2fs_gd[cg].ext2bgd_i_tables + fs->e2fs->e2fs_ipg / inodes_per_block ) { if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) || tmp == ext2_get_cg_number(fs, cg)) setbit(bp->b_data, tmp - start); tmp++; } /* * Also if the number of blocks within the group is less than * the blocksize * 8 ( which is the size of bitmap ), set rest * of the block bitmap to 1 */ ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8, bp->b_data); /* Clean the flag */ fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT; return (0); } /* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */ static daddr_t ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; daddr_t bno, runstart, runlen; int bit, loc, end, error, start; char *bbp; /* XXX ondisk32 */ fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { error = ext2_cg_block_bitmap_init(fs, cg, bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } } if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) { /* * Another thread allocated the last block in this * group while we were waiting for the buffer. */ brelse(bp); EXT2_LOCK(ump); return (0); } bbp = (char *)bp->b_data; if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) { bpref = dtogd(fs, bpref); /* * if the requested block is available, use it */ if (isclr(bbp, bpref)) { bno = bpref; goto gotit; } } /* * no blocks in the requested cylinder, so take next * available one in this cylinder group. * first try to get 8 contigous blocks, then fall back to a single * block. */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; retry: runlen = 0; runstart = 0; for (loc = start; loc < end; loc++) { if (bbp[loc] == (char)0xff) { runlen = 0; continue; } /* Start of a run, find the number of high clear bits. */ if (runlen == 0) { bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } else if (bbp[loc] == 0) { /* Continue a run. */ runlen += NBBY; } else { /* * Finish the current run. If it isn't long * enough, start a new one. */ bit = ffs(bbp[loc]) - 1; runlen += bit; if (runlen >= 8) { bno = runstart; goto gotit; } /* Run was too short, start a new one. */ bit = fls(bbp[loc]); runlen = NBBY - bit; runstart = loc * NBBY + bit; } /* If the current run is long enough, use it. */ if (runlen >= 8) { bno = runstart; goto gotit; } } if (start != 0) { end = start; start = 0; goto retry; } bno = ext2_mapsearch(fs, bbp, bpref); if (bno < 0) { brelse(bp); EXT2_LOCK(ump); return (0); } gotit: #ifdef INVARIANTS if (isset(bbp, bno)) { printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n", cg, (intmax_t)bno, fs->e2fs_fsmnt); panic("ext2fs_alloccg: dup alloc"); } #endif setbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, -1); fs->e2fs->e2fs_fbcount--; fs->e2fs_gd[cg].ext2bgd_nbfree--; fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); } /* * Determine whether a cluster can be allocated. */ static daddr_t ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp; char *bbp; int bit, error, got, i, loc, run; int32_t *lp; daddr_t bno; fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_maxcluster[cg] < len) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) goto fail_lock; bbp = (char *)bp->b_data; EXT2_LOCK(ump); /* * Check to see if a cluster of the needed size (or bigger) is * available in this cylinder group. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len]; for (i = len; i <= fs->e2fs_contigsumsize; i++) if (*lp++ > 0) break; if (i > fs->e2fs_contigsumsize) { /* * Update the cluster summary information to reflect * the true maximum-sized cluster so that future cluster * allocation requests can avoid reading the bitmap only * to find no cluster. */ lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1]; for (i = len - 1; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; goto fail; } EXT2_UNLOCK(ump); /* Search the bitmap to find a big enough cluster like in FFS. */ if (dtog(fs, bpref) != cg) bpref = 0; if (bpref != 0) bpref = dtogd(fs, bpref); loc = bpref / NBBY; bit = 1 << (bpref % NBBY); for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) { if ((bbp[loc] & bit) != 0) run = 0; else { run++; if (run == len) break; } if ((got & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } if (got >= fs->e2fs->e2fs_fpg) goto fail_lock; /* Allocate the cluster that we found. */ for (i = 1; i < len; i++) if (!isclr(bbp, got - run + i)) panic("ext2_clusteralloc: map mismatch"); bno = got - run + 1; if (bno >= fs->e2fs->e2fs_fpg) panic("ext2_clusteralloc: allocated out of group"); EXT2_LOCK(ump); for (i = 0; i < len; i += fs->e2fs_fpb) { setbit(bbp, bno + i); ext2_clusteracct(fs, bbp, cg, bno + i, -1); fs->e2fs->e2fs_fbcount--; fs->e2fs_gd[cg].ext2bgd_nbfree--; } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno); fail_lock: EXT2_LOCK(ump); fail: brelse(bp); return (0); } static int ext2_zero_inode_table(struct inode *ip, int cg) { struct m_ext2fs *fs; struct buf *bp; int i, all_blks, used_blks; fs = ip->i_e2fs; if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED) return (0); all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg / fs->e2fs_bsize; used_blks = howmany(fs->e2fs->e2fs_ipg - fs->e2fs_gd[cg].ext4bgd_i_unused, fs->e2fs_bsize / EXT2_INODE_SIZE(fs)); for (i = 0; i < all_blks - used_blks; i++) { bp = getblk(ip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_tables + used_blks + i), fs->e2fs_bsize, 0, 0, 0); if (!bp) return (EIO); vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize); bawrite(bp); } fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED; return (0); } /* * Determine whether an inode can be allocated. * * Check to see if an inode is available, and if it is, * allocate it using tode in the specified cylinder group. */ static daddr_t ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int error, start, len; char *ibp, *loc; ipref--; /* to avoid a lot of (ipref -1) */ if (ipref == -1) ipref = 0; fs = ip->i_e2fs; ump = ip->i_ump; if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) return (0); EXT2_UNLOCK(ump); error = bread(ip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) { memset(bp->b_data, 0, fs->e2fs_bsize); fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT; } error = ext2_zero_inode_table(ip, cg); if (error) { brelse(bp); EXT2_LOCK(ump); return (0); } } if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) { /* * Another thread allocated the last i-node in this * group while we were waiting for the buffer. */ brelse(bp); EXT2_LOCK(ump); return (0); } ibp = (char *)bp->b_data; if (ipref) { ipref %= fs->e2fs->e2fs_ipg; if (isclr(ibp, ipref)) goto gotit; } start = ipref / NBBY; len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY); loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&ibp[start], 0xff, len); if (loc == NULL) { printf("cg = %d, ipref = %lld, fs = %s\n", cg, (long long)ipref, fs->e2fs_fsmnt); panic("ext2fs_nodealloccg: map corrupted"); /* NOTREACHED */ } } ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1; gotit: setbit(ibp, ipref); EXT2_LOCK(ump); fs->e2fs_gd[cg].ext2bgd_nifree--; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) fs->e2fs_gd[cg].ext4bgd_i_unused--; fs->e2fs->e2fs_ficount--; fs->e2fs_fmod = 1; if ((mode & IFMT) == IFDIR) { fs->e2fs_gd[cg].ext2bgd_ndirs++; fs->e2fs_total_dir++; } EXT2_UNLOCK(ump); bdwrite(bp); return (cg * fs->e2fs->e2fs_ipg + ipref + 1); } /* * Free a block or fragment. * */ void ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size) { struct m_ext2fs *fs; struct buf *bp; struct ext2mount *ump; int cg, error; char *bbp; fs = ip->i_e2fs; ump = ip->i_ump; cg = dtog(fs, bno); if ((u_int)bno >= fs->e2fs->e2fs_bcount) { printf("bad block %lld, ino %ju\n", (long long)bno, (uintmax_t)ip->i_number); ext2_fserr(fs, ip->i_uid, "bad block"); return; } error = bread(ip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return; } bbp = (char *)bp->b_data; bno = dtogd(fs, bno); if (isclr(bbp, bno)) { printf("block = %lld, fs = %s\n", (long long)bno, fs->e2fs_fsmnt); panic("ext2_blkfree: freeing free block"); } clrbit(bbp, bno); EXT2_LOCK(ump); ext2_clusteracct(fs, bbp, cg, bno, 1); fs->e2fs->e2fs_fbcount++; fs->e2fs_gd[cg].ext2bgd_nbfree++; fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); } /* * Free an inode. * */ int ext2_vfree(struct vnode *pvp, ino_t ino, int mode) { struct m_ext2fs *fs; struct inode *pip; struct buf *bp; struct ext2mount *ump; int error, cg; char *ibp; pip = VTOI(pvp); fs = pip->i_e2fs; ump = pip->i_ump; if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount) panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s", pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt); cg = ino_to_cg(fs, ino); error = bread(pip->i_devvp, fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (0); } ibp = (char *)bp->b_data; ino = (ino - 1) % fs->e2fs->e2fs_ipg; if (isclr(ibp, ino)) { printf("ino = %llu, fs = %s\n", (unsigned long long)ino, fs->e2fs_fsmnt); if (fs->e2fs_ronly == 0) panic("ext2_vfree: freeing free inode"); } clrbit(ibp, ino); EXT2_LOCK(ump); fs->e2fs->e2fs_ficount++; fs->e2fs_gd[cg].ext2bgd_nifree++; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) fs->e2fs_gd[cg].ext4bgd_i_unused++; if ((mode & IFMT) == IFDIR) { fs->e2fs_gd[cg].ext2bgd_ndirs--; fs->e2fs_total_dir--; } fs->e2fs_fmod = 1; EXT2_UNLOCK(ump); bdwrite(bp); return (0); } /* * Find a block in the specified cylinder group. * * It is a panic if a request is made to find a block if none are * available. */ static daddr_t ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref) { char *loc; int start, len; /* * find the fragment by searching through the free block * map for an appropriate bit pattern */ if (bpref) start = dtogd(fs, bpref) / NBBY; else start = 0; len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { len = start + 1; start = 0; loc = memcchr(&bbp[start], 0xff, len); if (loc == NULL) { printf("start = %d, len = %d, fs = %s\n", start, len, fs->e2fs_fsmnt); panic("ext2_mapsearch: map corrupted"); /* NOTREACHED */ } } return ((loc - bbp) * NBBY + ffs(~*loc) - 1); } /* * Fserr prints the name of a filesystem with an error diagnostic. * * The form of the error message is: * fs: error message */ void ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp) { log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp); } int ext2_cg_has_sb(struct m_ext2fs *fs, int cg) { int a3, a5, a7; if (cg == 0) return (1); if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) { if (cg == fs->e2fs->e4fs_backup_bgs[0] || cg == fs->e2fs->e4fs_backup_bgs[1]) return (1); return (0); } if ((cg <= 1) || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER)) return (1); if (!(cg & 1)) return (0); for (a3 = 3, a5 = 5, a7 = 7; a3 <= cg || a5 <= cg || a7 <= cg; a3 *= 3, a5 *= 5, a7 *= 7) if (cg == a3 || cg == a5 || cg == a7) return (1); return (0); } Index: head/sys/fs/ext2fs/ext2_balloc.c =================================================================== --- head/sys/fs/ext2fs/ext2_balloc.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_balloc.c (revision 324706) @@ -1,300 +1,374 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_balloc.c 8.4 (Berkeley) 9/23/93 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include +static int +ext2_ext_balloc(struct inode *ip, uint32_t lbn, int size, + struct ucred *cred, struct buf **bpp, int flags) +{ + struct m_ext2fs *fs; + struct buf *bp = NULL; + struct vnode *vp = ITOV(ip); + uint32_t nb; + int osize, nsize, blks, error, allocated; + + fs = ip->i_e2fs; + blks = howmany(size, fs->e2fs_bsize); + + error = ext4_ext_get_blocks(ip, lbn, blks, cred, NULL, &allocated, &nb); + if (error) + return (error); + + if (allocated) { + if (ip->i_size < (lbn + 1) * fs->e2fs_bsize) + nsize = fragroundup(fs, size); + else + nsize = fs->e2fs_bsize; + + bp = getblk(vp, lbn, nsize, 0, 0, 0); + if(!bp) + return (EIO); + + bp->b_blkno = fsbtodb(fs, nb); + if (flags & BA_CLRBUF) + vfs_bio_clrbuf(bp); + } else { + if (ip->i_size >= (lbn + 1) * fs->e2fs_bsize) { + + error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (error); + } + bp->b_blkno = fsbtodb(fs, nb); + *bpp = bp; + return (0); + } + + /* + * Consider need to reallocate a fragment. + */ + osize = fragroundup(fs, blkoff(fs, ip->i_size)); + nsize = fragroundup(fs, size); + if (nsize <= osize) { + error = bread(vp, lbn, osize, NOCRED, &bp); + if (error) { + brelse(bp); + return (error); + } + bp->b_blkno = fsbtodb(fs, nb); + } else { + error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (error); + } + bp->b_blkno = fsbtodb(fs, nb); + } + } + + *bpp = bp; + + return (error); +} + /* * Balloc defines the structure of filesystem storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ int ext2_balloc(struct inode *ip, e2fs_lbn_t lbn, int size, struct ucred *cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; struct ext2mount *ump; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[EXT2_NIADDR + 2]; e4fs_daddr_t nb, newb; e2fs_daddr_t *bap, pref; int osize, nsize, num, i, error; *bpp = NULL; if (lbn < 0) return (EFBIG); fs = ip->i_e2fs; ump = ip->i_ump; /* * check if this is a sequential block allocation. * If so, increment next_alloc fields to allow ext2_blkpref * to make a good guess */ if (lbn == ip->i_next_alloc_block + 1) { ip->i_next_alloc_block++; ip->i_next_alloc_goal++; } + + if (ip->i_flag & IN_E4EXTENTS) + return (ext2_ext_balloc(ip, lbn, size, cred, bpp, flags)); + /* * The first EXT2_NDADDR blocks are direct blocks */ if (lbn < EXT2_NDADDR) { nb = ip->i_db[lbn]; /* * no new block is to be allocated, and no need to expand * the file */ if (nb != 0 && ip->i_size >= (lbn + 1) * fs->e2fs_bsize) { error = bread(vp, lbn, fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = fsbtodb(fs, nb); *bpp = bp; return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { error = bread(vp, lbn, osize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = fsbtodb(fs, nb); } else { /* * Godmar thinks: this shouldn't happen w/o * fragments */ printf("nsize %d(%d) > osize %d(%d) nb %d\n", (int)nsize, (int)size, (int)osize, (int)ip->i_size, (int)nb); panic( "ext2_balloc: Something is terribly wrong"); /* * please note there haven't been any changes from here on - * FFS seems to work. */ } } else { if (ip->i_size < (lbn + 1) * fs->e2fs_bsize) nsize = fragroundup(fs, size); else nsize = fs->e2fs_bsize; EXT2_LOCK(ump); error = ext2_alloc(ip, lbn, ext2_blkpref(ip, lbn, (int)lbn, &ip->i_db[0], 0), nsize, cred, &newb); if (error) return (error); bp = getblk(vp, lbn, nsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(bp); } ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bpp = bp; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ext2_getlbns(vp, lbn, indirs, &num)) != 0) return (error); #ifdef INVARIANTS if (num < 1) panic("ext2_balloc: ext2_getlbns returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ib[indirs[0].in_off]; if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[0].in_off + EXT2_NDIR_BLOCKS, &ip->i_db[0], 0); if ((error = ext2_alloc(ip, lbn, pref, fs->e2fs_bsize, cred, &newb))) return (error); nb = newb; bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0, 0); bp->b_blkno = fsbtodb(fs, newb); vfs_bio_clrbuf(bp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); return (error); } ip->i_ib[indirs[0].in_off] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bap = (e2fs_daddr_t *)bp->b_data; nb = bap[indirs[i].in_off]; if (i == num) break; i += 1; if (nb != 0) { bqrelse(bp); continue; } EXT2_LOCK(ump); if (pref == 0) pref = ext2_blkpref(ip, lbn, indirs[i].in_off, bap, bp->b_lblkno); error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb); if (error) { brelse(bp); return (error); } nb = newb; nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); vfs_bio_clrbuf(nbp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { ext2_blkfree(ip, nb, fs->e2fs_bsize); EXT2_UNLOCK(ump); brelse(bp); return (error); } bap[indirs[i - 1].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { EXT2_LOCK(ump); pref = ext2_blkpref(ip, lbn, indirs[i].in_off, &bap[0], bp->b_lblkno); if ((error = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newb)) != 0) { brelse(bp); return (error); } nb = newb; nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); if (flags & BA_CLRBUF) vfs_bio_clrbuf(nbp); bap[indirs[i].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } *bpp = nbp; return (0); } brelse(bp); if (flags & BA_CLRBUF) { int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT; if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, (int)fs->e2fs_bsize, NOCRED, MAXBSIZE, seqcount, 0, &nbp); } else { error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, &nbp); } if (error) { brelse(nbp); return (error); } } else { nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); } *bpp = nbp; return (0); } Index: head/sys/fs/ext2fs/ext2_bmap.c =================================================================== --- head/sys/fs/ext2fs/ext2_bmap.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_bmap.c (revision 324706) @@ -1,386 +1,381 @@ /*- * Copyright (c) 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -static int ext4_bmapext(struct vnode *, int32_t, int64_t *, int *, int *); - /* * Bmap converts the logical block number of a file to its physical block * number on the disk. The conversion is done by using the logical block * number to index into the array of block pointers described by the dinode. */ int ext2_bmap(struct vop_bmap_args *ap) { daddr_t blkno; int error; /* * Check for underlying vnode requests and ensure that logical * to physical mapping is requested. */ if (ap->a_bop != NULL) *ap->a_bop = &VTOI(ap->a_vp)->i_devvp->v_bufobj; if (ap->a_bnp == NULL) return (0); if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS) error = ext4_bmapext(ap->a_vp, ap->a_bn, &blkno, ap->a_runp, ap->a_runb); else error = ext2_bmaparray(ap->a_vp, ap->a_bn, &blkno, ap->a_runp, ap->a_runb); *ap->a_bnp = blkno; return (error); } /* * Convert the logical block number of a file to its physical block number * on the disk within ext4 extents. */ -static int +int ext4_bmapext(struct vnode *vp, int32_t bn, int64_t *bnp, int *runp, int *runb) { struct inode *ip; struct m_ext2fs *fs; + struct ext4_extent_header *ehp; struct ext4_extent *ep; - struct ext4_extent_path path = {.ep_bp = NULL}; + struct ext4_extent_path *path = NULL; daddr_t lbn; - int error; + int error, depth; ip = VTOI(vp); fs = ip->i_e2fs; lbn = bn; + ehp = (struct ext4_extent_header *)ip->i_data; + depth = ehp->eh_depth; + *bnp = -1; if (runp != NULL) *runp = 0; if (runb != NULL) *runb = 0; - error = 0; - ext4_ext_find_extent(fs, ip, lbn, &path); - if (path.ep_is_sparse) { - *bnp = -1; - if (runp != NULL) - *runp = path.ep_sparse_ext.e_len - - (lbn - path.ep_sparse_ext.e_blk) - 1; - if (runb != NULL) - *runb = lbn - path.ep_sparse_ext.e_blk; - } else { - if (path.ep_ext == NULL) { - error = EIO; - goto out; - } - ep = path.ep_ext; - *bnp = fsbtodb(fs, lbn - ep->e_blk + - (ep->e_start_lo | (daddr_t)ep->e_start_hi << 32)); + error = ext4_ext_find_extent(ip, lbn, &path); + if (error) + return (error); - if (*bnp == 0) - *bnp = -1; - - if (runp != NULL) - *runp = ep->e_len - (lbn - ep->e_blk) - 1; - if (runb != NULL) - *runb = lbn - ep->e_blk; + ep = path[depth].ep_ext; + if(ep) { + if (lbn < ep->e_blk) { + if (runp != NULL) + *runp = ep->e_blk - lbn - 1; + } else if (ep->e_blk <= lbn && lbn < ep->e_blk + ep->e_len) { + *bnp = fsbtodb(fs, lbn - ep->e_blk + + (ep->e_start_lo | (daddr_t)ep->e_start_hi << 32)); + if (runp != NULL) + *runp = ep->e_len - (lbn - ep->e_blk) - 1; + if (runb != NULL) + *runb = lbn - ep->e_blk; + } else { + if (runb != NULL) + *runb = ep->e_blk + lbn - ep->e_len; + } } -out: - if (path.ep_bp != NULL) - brelse(path.ep_bp); + ext4_ext_path_free(path); return (error); } /* * Indirect blocks are now on the vnode for the file. They are given negative * logical block numbers. Indirect blocks are addressed by the negative * address of the first data block to which they point. Double indirect blocks * are addressed by one less than the address of the first indirect block to * which they point. Triple indirect blocks are addressed by one less than * the address of the first double indirect block to which they point. * * ext2_bmaparray does the bmap conversion, and if requested returns the * array of logical blocks which must be traversed to get to a block. * Each entry contains the offset into that block that gets you to the * next block and the disk address of the block (if it is assigned). */ int ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb) { struct inode *ip; struct buf *bp; struct ext2mount *ump; struct mount *mp; struct indir a[EXT2_NIADDR + 1], *ap; daddr_t daddr; e2fs_lbn_t metalbn; int error, num, maxrun = 0, bsize; int *nump; ap = NULL; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOEXT2(mp); bsize = EXT2_BLOCK_SIZE(ump->um_e2fs); if (runp) { maxrun = mp->mnt_iosize_max / bsize - 1; *runp = 0; } if (runb) *runb = 0; ap = a; nump = # error = ext2_getlbns(vp, bn, ap, nump); if (error) return (error); num = *nump; if (num == 0) { *bnp = blkptrtodb(ump, ip->i_db[bn]); if (*bnp == 0) { *bnp = -1; } else if (runp) { daddr_t bnb = bn; for (++bn; bn < EXT2_NDADDR && *runp < maxrun && is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); ++bn, ++*runp); bn = bnb; if (runb && (bn > 0)) { for (--bn; (bn >= 0) && (*runb < maxrun) && is_sequential(ump, ip->i_db[bn], ip->i_db[bn + 1]); --bn, ++*runb); } } return (0); } /* Get disk address out of indirect block array */ daddr = ip->i_ib[ap->in_off]; for (bp = NULL, ++ap; --num; ++ap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = ap->in_lbn; if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) break; /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) bqrelse(bp); bp = getblk(vp, metalbn, bsize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { #ifdef INVARIANTS if (!daddr) panic("ext2_bmaparray: indirect block not in cache"); #endif bp->b_blkno = blkptrtodb(ump, daddr); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); #ifdef RACCT if (racct_enable) { PROC_LOCK(curproc); racct_add_buf(curproc, bp, 0); PROC_UNLOCK(curproc); } #endif curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error) { brelse(bp); return (error); } } daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off]; if (num == 1 && daddr && runp) { for (bn = ap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn - 1], ((e2fs_daddr_t *)bp->b_data)[bn]); ++bn, ++*runp); bn = ap->in_off; if (runb && bn) { for (--bn; bn >= 0 && *runb < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn], ((e2fs_daddr_t *)bp->b_data)[bn + 1]); --bn, ++*runb); } } } if (bp) bqrelse(bp); /* * Since this is FFS independent code, we are out of scope for the * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they * will fall in the range 1..um_seqinc, so we use that test and * return a request for a zeroed out buffer if attempts are made * to read a BLK_NOCOPY or BLK_SNAP block. */ if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc) { *bnp = -1; return (0); } *bnp = blkptrtodb(ump, daddr); if (*bnp == 0) { *bnp = -1; } return (0); } /* * Create an array of logical block number/offset pairs which represent the * path of indirect blocks required to access a data block. The first "pair" * contains the logical block number of the appropriate single, double or * triple indirect block and the offset into the inode indirect block array. * Note, the logical block number of the inode single/double/triple indirect * block appears twice in the array, once with the offset into the i_ib and * once with the offset into the page itself. */ int ext2_getlbns(struct vnode *vp, daddr_t bn, struct indir *ap, int *nump) { long blockcnt; e2fs_lbn_t metalbn, realbn; struct ext2mount *ump; int i, numlevels, off; int64_t qblockcnt; ump = VFSTOEXT2(vp->v_mount); if (nump) *nump = 0; numlevels = 0; realbn = bn; if ((long)bn < 0) bn = -(long)bn; /* The first EXT2_NDADDR blocks are direct blocks. */ if (bn < EXT2_NDADDR) return (0); /* * Determine the number of levels of indirection. After this loop * is done, blockcnt indicates the number of data blocks possible * at the previous level of indirection, and EXT2_NIADDR - i is the * number of levels of indirection needed to locate the requested block. */ for (blockcnt = 1, i = EXT2_NIADDR, bn -= EXT2_NDADDR; ; i--, bn -= blockcnt) { if (i == 0) return (EFBIG); /* * Use int64_t's here to avoid overflow for triple indirect * blocks when longs have 32 bits and the block size is more * than 4K. */ qblockcnt = (int64_t)blockcnt * MNINDIR(ump); if (bn < qblockcnt) break; blockcnt = qblockcnt; } /* Calculate the address of the first meta-block. */ if (realbn >= 0) metalbn = -(realbn - bn + EXT2_NIADDR - i); else metalbn = -(-realbn - bn + EXT2_NIADDR - i); /* * At each iteration, off is the offset into the bap array which is * an array of disk addresses at the current level of indirection. * The logical block number and the offset in that block are stored * into the argument array. */ ap->in_lbn = metalbn; ap->in_off = off = EXT2_NIADDR - i; ap++; for (++numlevels; i <= EXT2_NIADDR; i++) { /* If searching for a meta-data block, quit when found. */ if (metalbn == realbn) break; off = (bn / blockcnt) % MNINDIR(ump); ++numlevels; ap->in_lbn = metalbn; ap->in_off = off; ++ap; metalbn -= -1 + off * blockcnt; blockcnt /= MNINDIR(ump); } if (nump) *nump = numlevels; return (0); } Index: head/sys/fs/ext2fs/ext2_extattr.c =================================================================== --- head/sys/fs/ext2fs/ext2_extattr.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_extattr.c (revision 324706) @@ -1,1225 +1,1225 @@ /*- * Copyright (c) 2017, Fedor Uporov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ext2_extattr_attrnamespace_to_bsd(int attrnamespace) { switch (attrnamespace) { case EXT4_XATTR_INDEX_SYSTEM: return (EXTATTR_NAMESPACE_SYSTEM); case EXT4_XATTR_INDEX_USER: return (EXTATTR_NAMESPACE_USER); case EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT: return (POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE); case EXT4_XATTR_INDEX_POSIX_ACL_ACCESS: return (POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE); } return (EXTATTR_NAMESPACE_EMPTY); } static const char * ext2_extattr_name_to_bsd(int attrnamespace, const char *name, int* name_len) { if (attrnamespace == EXT4_XATTR_INDEX_SYSTEM) return (name); else if (attrnamespace == EXT4_XATTR_INDEX_USER) return (name); else if (attrnamespace == EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT) { *name_len = strlen(POSIX1E_ACL_DEFAULT_EXTATTR_NAME); return (POSIX1E_ACL_DEFAULT_EXTATTR_NAME); } else if (attrnamespace == EXT4_XATTR_INDEX_POSIX_ACL_ACCESS) { *name_len = strlen(POSIX1E_ACL_ACCESS_EXTATTR_NAME); return (POSIX1E_ACL_ACCESS_EXTATTR_NAME); } /* * XXX: Not all linux namespaces are mapped to bsd for now, * return NULL, which will be converted to ENOTSUP on upper layer. */ #ifdef EXT2FS_DEBUG printf("can not convert ext2fs name to bsd: namespace=%d\n", attrnamespace); #endif return (NULL); } static int ext2_extattr_attrnamespace_to_linux(int attrnamespace, const char *name) { if (attrnamespace == POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE && !strcmp(name, POSIX1E_ACL_DEFAULT_EXTATTR_NAME)) return (EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT); if (attrnamespace == POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE && !strcmp(name, POSIX1E_ACL_ACCESS_EXTATTR_NAME)) return (EXT4_XATTR_INDEX_POSIX_ACL_ACCESS); switch (attrnamespace) { case EXTATTR_NAMESPACE_SYSTEM: return (EXT4_XATTR_INDEX_SYSTEM); case EXTATTR_NAMESPACE_USER: return (EXT4_XATTR_INDEX_USER); } /* * In this case namespace conversion should be unique, * so this point is unreachable. */ return (-1); } static const char * ext2_extattr_name_to_linux(int attrnamespace, const char *name) { if (attrnamespace == POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE || attrnamespace == POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE) return (""); else return (name); } int ext2_extattr_valid_attrname(int attrnamespace, const char *attrname) { if (attrnamespace == EXTATTR_NAMESPACE_EMPTY) return (EINVAL); if (strlen(attrname) == 0) return (EINVAL); if (strlen(attrname) + 1 > EXT2_EXTATTR_NAMELEN_MAX) return (ENAMETOOLONG); return (0); } static int ext2_extattr_check(struct ext2fs_extattr_entry *entry, char *end) { struct ext2fs_extattr_entry *next; while (!EXT2_IS_LAST_ENTRY(entry)) { next = EXT2_EXTATTR_NEXT(entry); if ((char *)next >= end) return (EIO); entry = next; } return (0); } int ext2_extattr_inode_list(struct inode *ip, int attrnamespace, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); if (header->h_magic != EXTATTR_MAGIC) { brelse(bp); return (0); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (uio == NULL) *size += name_len + 1; else { char *name = malloc(name_len + 1, M_TEMP, M_WAITOK); name[0] = name_len; memcpy(&name[1], attr_name, name_len); error = uiomove(name, name_len + 1, uio); free(name, M_TEMP); if (error) break; } } brelse(bp); return (error); } int ext2_extattr_block_list(struct inode *ip, int attrnamespace, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (uio == NULL) *size += name_len + 1; else { char *name = malloc(name_len + 1, M_TEMP, M_WAITOK); name[0] = name_len; memcpy(&name[1], attr_name, name_len); error = uiomove(name, name_len + 1, uio); free(name, M_TEMP); if (error) break; } } brelse(bp); return (error); } int ext2_extattr_inode_get(struct inode *ip, int attrnamespace, const char *name, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); if (header->h_magic != EXTATTR_MAGIC) { brelse(bp); return (ENOATTR); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { if (uio == NULL) *size += entry->e_value_size; else { error = uiomove(((char *)EXT2_IFIRST(header)) + entry->e_value_offs, entry->e_value_size, uio); } brelse(bp); return (error); } } brelse(bp); return (ENOATTR); } int ext2_extattr_block_get(struct inode *ip, int attrnamespace, const char *name, struct uio *uio, size_t *size) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { if (uio == NULL) *size += entry->e_value_size; else { error = uiomove(bp->b_data + entry->e_value_offs, entry->e_value_size, uio); } brelse(bp); return (error); } } brelse(bp); return (ENOATTR); } static uint16_t ext2_extattr_delete_value(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end) { uint16_t min_offs; struct ext2fs_extattr_entry *next; min_offs = end - off; next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) { if (min_offs > next->e_value_offs && next->e_value_offs > 0) min_offs = next->e_value_offs; next = EXT2_EXTATTR_NEXT(next); } if (entry->e_value_size == 0) return (min_offs); memmove(off + min_offs + EXT2_EXTATTR_SIZE(entry->e_value_size), off + min_offs, entry->e_value_offs - min_offs); /* Adjust all value offsets */ next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) { if (next->e_value_offs > 0 && next->e_value_offs < entry->e_value_offs) next->e_value_offs += EXT2_EXTATTR_SIZE(entry->e_value_size); next = EXT2_EXTATTR_NEXT(next); } min_offs += EXT2_EXTATTR_SIZE(entry->e_value_size); return (min_offs); } static void ext2_extattr_delete_entry(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end) { char *pad; struct ext2fs_extattr_entry *next; /* Clean entry value */ ext2_extattr_delete_value(off, first_entry, entry, end); /* Clean the entry */ next = first_entry; while (!EXT2_IS_LAST_ENTRY(next)) next = EXT2_EXTATTR_NEXT(next); pad = (char*)next + sizeof(uint32_t); memmove(entry, (char *)entry + EXT2_EXTATTR_LEN(entry->e_name_len), pad - ((char *)entry + EXT2_EXTATTR_LEN(entry->e_name_len))); } int ext2_extattr_inode_delete(struct inode *ip, int attrnamespace, const char *name) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); if (header->h_magic != EXTATTR_MAGIC) { brelse(bp); return (ENOATTR); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } /* If I am last entry, just make magic zero */ entry = EXT2_IFIRST(header); if ((EXT2_IS_LAST_ENTRY(EXT2_EXTATTR_NEXT(entry))) && (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) == attrnamespace)) { name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { memset(header, 0, sizeof(struct ext2fs_extattr_dinode_header)); return (bwrite(bp)); } } for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ext2_extattr_delete_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), entry, (char *)dinode + EXT2_INODE_SIZE(fs)); return (bwrite(bp)); } } brelse(bp); return (ENOATTR); } static int ext2_extattr_block_clone(struct inode *ip, struct buf **bpp) { struct m_ext2fs *fs; struct buf *sbp; struct buf *cbp; struct ext2fs_extattr_header *header; uint64_t facl; fs = ip->i_e2fs; sbp = *bpp; header = EXT2_HDR(sbp); if (header->h_magic != EXTATTR_MAGIC || header->h_refcount == 1) return (EINVAL); - facl = ext2_allocfacl(ip); + facl = ext2_alloc_meta(ip); if (!facl) return (ENOSPC); cbp = getblk(ip->i_devvp, fsbtodb(fs, facl), fs->e2fs_bsize, 0, 0, 0); if (!cbp) { ext2_blkfree(ip, facl, fs->e2fs_bsize); return (EIO); } memcpy(cbp->b_data, sbp->b_data, fs->e2fs_bsize); header->h_refcount--; bwrite(sbp); ip->i_facl = facl; ext2_update(ip->i_vnode, 1); header = EXT2_HDR(cbp); header->h_refcount = 1; *bpp = cbp; return (0); } int ext2_extattr_block_delete(struct inode *ip, int attrnamespace, const char *name) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; int error; fs = ip->i_e2fs; error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } if (header->h_refcount > 1) { error = ext2_extattr_block_clone(ip, &bp); if (error) { brelse(bp); return (error); } } /* If I am last entry, clean me and free the block */ entry = EXT2_FIRST_ENTRY(bp); if (EXT2_IS_LAST_ENTRY(EXT2_EXTATTR_NEXT(entry)) && (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) == attrnamespace)) { name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ip->i_blocks -= btodb(fs->e2fs_bsize); ext2_blkfree(ip, ip->i_facl, fs->e2fs_bsize); ip->i_facl = 0; error = ext2_update(ip->i_vnode, 1); brelse(bp); return (error); } } for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) { ext2_extattr_delete_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), entry, bp->b_data + bp->b_bufsize); return (bwrite(bp)); } } brelse(bp); return (ENOATTR); } static struct ext2fs_extattr_entry * allocate_entry(const char *name, int attrnamespace, uint16_t offs, uint32_t size, uint32_t hash) { const char *attr_name; int name_len; struct ext2fs_extattr_entry *entry; attr_name = ext2_extattr_name_to_linux(attrnamespace, name); name_len = strlen(attr_name); entry = malloc(sizeof(struct ext2fs_extattr_entry) + name_len, M_TEMP, M_WAITOK); entry->e_name_len = name_len; entry->e_name_index = ext2_extattr_attrnamespace_to_linux(attrnamespace, name); entry->e_value_offs = offs; entry->e_value_block = 0; entry->e_value_size = size; entry->e_hash = hash; memcpy(entry->e_name, name, name_len); return (entry); } static void free_entry(struct ext2fs_extattr_entry *entry) { free(entry, M_TEMP); } static int ext2_extattr_get_size(struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *exist_entry, int header_size, int name_len, int new_size) { struct ext2fs_extattr_entry *entry; int size; size = header_size; size += sizeof(uint32_t); if (NULL == exist_entry) { size += EXT2_EXTATTR_LEN(name_len); size += EXT2_EXTATTR_SIZE(new_size); } if (first_entry) for (entry = first_entry; !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (entry != exist_entry) size += EXT2_EXTATTR_LEN(entry->e_name_len) + EXT2_EXTATTR_SIZE(entry->e_value_size); else size += EXT2_EXTATTR_LEN(entry->e_name_len) + EXT2_EXTATTR_SIZE(new_size); } return (size); } static void ext2_extattr_set_exist_entry(char *off, struct ext2fs_extattr_entry *first_entry, struct ext2fs_extattr_entry *entry, char *end, struct uio *uio) { uint16_t min_offs; min_offs = ext2_extattr_delete_value(off, first_entry, entry, end); entry->e_value_size = uio->uio_resid; if (entry->e_value_size) entry->e_value_offs = min_offs - EXT2_EXTATTR_SIZE(uio->uio_resid); else entry->e_value_offs = 0; uiomove(off + entry->e_value_offs, entry->e_value_size, uio); } static struct ext2fs_extattr_entry * ext2_extattr_set_new_entry(char *off, struct ext2fs_extattr_entry *first_entry, const char *name, int attrnamespace, char *end, struct uio *uio) { int name_len; char *pad; uint16_t min_offs; struct ext2fs_extattr_entry *entry; struct ext2fs_extattr_entry *new_entry; /* Find pad's */ min_offs = end - off; entry = first_entry; while (!EXT2_IS_LAST_ENTRY(entry)) { if (min_offs > entry->e_value_offs && entry->e_value_offs > 0) min_offs = entry->e_value_offs; entry = EXT2_EXTATTR_NEXT(entry); } pad = (char*)entry + sizeof(uint32_t); /* Find entry insert position */ name_len = strlen(name); entry = first_entry; while (!EXT2_IS_LAST_ENTRY(entry)) { if (!(attrnamespace - entry->e_name_index) && !(name_len - entry->e_name_len)) if (memcmp(name, entry->e_name, name_len) <= 0) break; entry = EXT2_EXTATTR_NEXT(entry); } /* Create new entry and insert it */ new_entry = allocate_entry(name, attrnamespace, 0, uio->uio_resid, 0); memmove((char *)entry + EXT2_EXTATTR_LEN(new_entry->e_name_len), entry, pad - (char*)entry); memcpy(entry, new_entry, EXT2_EXTATTR_LEN(new_entry->e_name_len)); free_entry(new_entry); new_entry = entry; if (new_entry->e_value_size > 0) new_entry->e_value_offs = min_offs - EXT2_EXTATTR_SIZE(new_entry->e_value_size); uiomove(off + new_entry->e_value_offs, new_entry->e_value_size, uio); return (new_entry); } int ext2_extattr_inode_set(struct inode *ip, int attrnamespace, const char *name, struct uio *uio) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_dinode_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; size_t size = 0, max_size; int error; fs = ip->i_e2fs; if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } struct ext2fs_dinode *dinode = (struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)); /* Check attributes magic value */ header = (struct ext2fs_extattr_dinode_header *)((char *)dinode + E2FS_REV0_INODE_SIZE + dinode->e2di_extra_isize); if (header->h_magic != EXTATTR_MAGIC) { brelse(bp); return (ENOSPC); } error = ext2_extattr_check(EXT2_IFIRST(header), (char *)dinode + EXT2_INODE_SIZE(fs)); if (error) { brelse(bp); return (error); } /* Find if entry exist */ for (entry = EXT2_IFIRST(header); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) break; } max_size = EXT2_INODE_SIZE(fs) - E2FS_REV0_INODE_SIZE - dinode->e2di_extra_isize; if (!EXT2_IS_LAST_ENTRY(entry)) { size = ext2_extattr_get_size(EXT2_IFIRST(header), entry, sizeof(struct ext2fs_extattr_dinode_header), entry->e_name_len, uio->uio_resid); if (size > max_size) { brelse(bp); return (ENOSPC); } ext2_extattr_set_exist_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), entry, (char *)header + max_size, uio); } else { /* Ensure that the same entry does not exist in the block */ if (ip->i_facl) { error = ext2_extattr_block_get(ip, attrnamespace, name, NULL, &size); if (error != ENOATTR || size > 0) { brelse(bp); if (size > 0) error = ENOSPC; return (error); } } size = ext2_extattr_get_size(EXT2_IFIRST(header), NULL, sizeof(struct ext2fs_extattr_dinode_header), entry->e_name_len, uio->uio_resid); if (size > max_size) { brelse(bp); return (ENOSPC); } ext2_extattr_set_new_entry((char *)EXT2_IFIRST(header), EXT2_IFIRST(header), name, attrnamespace, (char *)header + max_size, uio); } return (bwrite(bp)); } static void ext2_extattr_hash_entry(struct ext2fs_extattr_header *header, struct ext2fs_extattr_entry *entry) { uint32_t hash = 0; char *name = entry->e_name; int n; for (n=0; n < entry->e_name_len; n++) { hash = (hash << EXT2_EXTATTR_NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_NAME_HASH_SHIFT)) ^ (*name++); } if (entry->e_value_block == 0 && entry->e_value_size != 0) { uint32_t *value = (uint32_t *)((char *)header + entry->e_value_offs); for (n = (entry->e_value_size + EXT2_EXTATTR_ROUND) >> EXT2_EXTATTR_PAD_BITS; n; n--) { hash = (hash << EXT2_EXTATTR_VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_VALUE_HASH_SHIFT)) ^ (*value++); } } entry->e_hash = hash; } static void ext2_extattr_rehash(struct ext2fs_extattr_header *header, struct ext2fs_extattr_entry *entry) { struct ext2fs_extattr_entry *here; uint32_t hash = 0; ext2_extattr_hash_entry(header, entry); here = EXT2_ENTRY(header+1); while (!EXT2_IS_LAST_ENTRY(here)) { if (!here->e_hash) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << EXT2_EXTATTR_BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - EXT2_EXTATTR_BLOCK_HASH_SHIFT)) ^ here->e_hash; here = EXT2_EXTATTR_NEXT(here); } header->h_hash = hash; } int ext2_extattr_block_set(struct inode *ip, int attrnamespace, const char *name, struct uio *uio) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; struct ext2fs_extattr_entry *entry; const char *attr_name; int name_len; size_t size; int error; fs = ip->i_e2fs; if (ip->i_facl) { error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } if (header->h_refcount > 1) { error = ext2_extattr_block_clone(ip, &bp); if (error) { brelse(bp); return (error); } header = EXT2_HDR(bp); } /* Find if entry exist */ for (entry = EXT2_FIRST_ENTRY(bp); !EXT2_IS_LAST_ENTRY(entry); entry = EXT2_EXTATTR_NEXT(entry)) { if (ext2_extattr_attrnamespace_to_bsd(entry->e_name_index) != attrnamespace) continue; name_len = entry->e_name_len; attr_name = ext2_extattr_name_to_bsd(entry->e_name_index, entry->e_name, &name_len); if (!attr_name) { brelse(bp); return (ENOTSUP); } if (strlen(name) == name_len && 0 == strncmp(attr_name, name, name_len)) break; } if (!EXT2_IS_LAST_ENTRY(entry)) { size = ext2_extattr_get_size(EXT2_FIRST_ENTRY(bp), entry, sizeof(struct ext2fs_extattr_header), entry->e_name_len, uio->uio_resid); if (size > bp->b_bufsize) { brelse(bp); return (ENOSPC); } ext2_extattr_set_exist_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), entry, bp->b_data + bp->b_bufsize, uio); } else { size = ext2_extattr_get_size(EXT2_FIRST_ENTRY(bp), NULL, sizeof(struct ext2fs_extattr_header), strlen(name), uio->uio_resid); if (size > bp->b_bufsize) { brelse(bp); return (ENOSPC); } entry = ext2_extattr_set_new_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), name, attrnamespace, bp->b_data + bp->b_bufsize, uio); /* Clean the same entry in the inode */ error = ext2_extattr_inode_delete(ip, attrnamespace, name); if (error && error != ENOATTR) { brelse(bp); return (error); } } ext2_extattr_rehash(header, entry); return (bwrite(bp)); } size = ext2_extattr_get_size(NULL, NULL, sizeof(struct ext2fs_extattr_header), strlen(ext2_extattr_name_to_linux(attrnamespace, name)), uio->uio_resid); if (size > fs->e2fs_bsize) return (ENOSPC); /* Allocate block, fill EA header and insert entry */ - ip->i_facl = ext2_allocfacl(ip); + ip->i_facl = ext2_alloc_meta(ip); if (0 == ip->i_facl) return (ENOSPC); ip->i_blocks += btodb(fs->e2fs_bsize); ext2_update(ip->i_vnode, 1); bp = getblk(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, 0, 0, 0); if (!bp) { ext2_blkfree(ip, ip->i_facl, fs->e2fs_bsize); ip->i_blocks -= btodb(fs->e2fs_bsize); ip->i_facl = 0; ext2_update(ip->i_vnode, 1); return (EIO); } header = EXT2_HDR(bp); header->h_magic = EXTATTR_MAGIC; header->h_refcount = 1; header->h_blocks = 1; header->h_hash = 0; memset(header->h_reserved, 0, sizeof(header->h_reserved)); memcpy(bp->b_data, header, sizeof(struct ext2fs_extattr_header)); memset(EXT2_FIRST_ENTRY(bp), 0, sizeof(uint32_t)); entry = ext2_extattr_set_new_entry(bp->b_data, EXT2_FIRST_ENTRY(bp), name, attrnamespace, bp->b_data + bp->b_bufsize, uio); /* Clean the same entry in the inode */ error = ext2_extattr_inode_delete(ip, attrnamespace, name); if (error && error != ENOATTR) { brelse(bp); return (error); } ext2_extattr_rehash(header, entry); return (bwrite(bp)); } int ext2_extattr_free(struct inode *ip) { struct m_ext2fs *fs; struct buf *bp; struct ext2fs_extattr_header *header; int error; fs = ip->i_e2fs; if (!ip->i_facl) return (0); error = bread(ip->i_devvp, fsbtodb(fs, ip->i_facl), fs->e2fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } /* Check attributes magic value */ header = EXT2_HDR(bp); if (header->h_magic != EXTATTR_MAGIC || header->h_blocks != 1) { brelse(bp); return (EINVAL); } error = ext2_extattr_check(EXT2_FIRST_ENTRY(bp), bp->b_data + bp->b_bufsize); if (error) { brelse(bp); return (error); } if (header->h_refcount > 1) { header->h_refcount--; bwrite(bp); } else { ext2_blkfree(ip, ip->i_facl, ip->i_e2fs->e2fs_bsize); brelse(bp); } ip->i_blocks -= btodb(ip->i_e2fs->e2fs_bsize); ip->i_facl = 0; ext2_update(ip->i_vnode, 1); return (0); } Index: head/sys/fs/ext2fs/ext2_extents.c =================================================================== --- head/sys/fs/ext2fs/ext2_extents.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_extents.c (revision 324706) @@ -1,219 +1,1587 @@ /*- * Copyright (c) 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include -static bool -ext4_ext_binsearch_index(struct inode *ip, struct ext4_extent_path *path, - daddr_t lbn, daddr_t *first_lbn, daddr_t *last_lbn){ - struct ext4_extent_header *ehp = path->ep_header; - struct ext4_extent_index *first, *last, *l, *r, *m; +static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents"); - first = (struct ext4_extent_index *)(char *)(ehp + 1); - last = first + ehp->eh_ecount - 1; - l = first; - r = last; - while (l <= r) { - m = l + (r - l) / 2; - if (lbn < m->ei_blk) - r = m - 1; - else - l = m + 1; - } +#ifdef EXT2FS_DEBUG +static void +ext4_ext_print_extent(struct ext4_extent *ep) +{ - if (l == first) { - path->ep_sparse_ext.e_blk = *first_lbn; - path->ep_sparse_ext.e_len = first->ei_blk - *first_lbn; - path->ep_sparse_ext.e_start_hi = 0; - path->ep_sparse_ext.e_start_lo = 0; - path->ep_is_sparse = true; - return (true); - } - path->ep_index = l - 1; - *first_lbn = path->ep_index->ei_blk; - if (path->ep_index < last) - *last_lbn = l->ei_blk - 1; - return (false); + printf(" ext %p => (blk %u len %u start %lu)\n", + ep, ep->e_blk, ep->e_len, + (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo); } +static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp); + static void -ext4_ext_binsearch(struct inode *ip, struct ext4_extent_path *path, daddr_t lbn, - daddr_t first_lbn, daddr_t last_lbn) +ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk) { - struct ext4_extent_header *ehp = path->ep_header; - struct ext4_extent *first, *l, *r, *m; + struct m_ext2fs *fs; + struct buf *bp; + int error; - if (ehp->eh_ecount == 0) - return; + fs = ip->i_e2fs; - first = (struct ext4_extent *)(char *)(ehp + 1); - l = first; - r = first + ehp->eh_ecount - 1; - while (l <= r) { - m = l + (r - l) / 2; - if (lbn < m->e_blk) - r = m - 1; - else - l = m + 1; - } + printf(" index %p => (blk %u pblk %lu)\n", + ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo); - if (l == first) { - path->ep_sparse_ext.e_blk = first_lbn; - path->ep_sparse_ext.e_len = first->e_blk - first_lbn; - path->ep_sparse_ext.e_start_hi = 0; - path->ep_sparse_ext.e_start_lo = 0; - path->ep_is_sparse = true; + if(!do_walk) return; + + if ((error = bread(ip->i_devvp, + fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)), + (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { + brelse(bp); + return; } - path->ep_ext = l - 1; - if (path->ep_ext->e_blk + path->ep_ext->e_len <= lbn) { - path->ep_sparse_ext.e_blk = path->ep_ext->e_blk + - path->ep_ext->e_len; - if (l <= (first + ehp->eh_ecount - 1)) - path->ep_sparse_ext.e_len = l->e_blk - - path->ep_sparse_ext.e_blk; + + ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data); + + brelse(bp); + +} + +static void +ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp) +{ + int i; + + printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n", + ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, + ehp->eh_gen); + + for (i = 0; i < ehp->eh_ecount; i++) + if (ehp->eh_depth != 0) + ext4_ext_print_index(ip, + (struct ext4_extent_index *)(ehp + 1 + i), 1); else - path->ep_sparse_ext.e_len = last_lbn - - path->ep_sparse_ext.e_blk + 1; - path->ep_sparse_ext.e_start_hi = 0; - path->ep_sparse_ext.e_start_lo = 0; - path->ep_is_sparse = true; + ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i)); +} + +static void +ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path) +{ + int k, l; + + l = path->ep_depth + + printf("ip=%d, Path:\n", ip->i_number); + for (k = 0; k <= l; k++, path++) { + if (path->ep_index) { + ext4_ext_print_index(ip, path->ep_index, 0); + } else if (path->ep_ext) { + ext4_ext_print_extent(path->ep_ext); + } } } -/* - * Find a block in ext4 extent cache. - */ +void +ext4_ext_print_extent_tree_status(struct inode * ip) +{ + struct m_ext2fs *fs; + struct ext4_extent_header *ehp; + + fs = ip->i_e2fs; + ehp = (struct ext4_extent_header *)(char *)ip->i_db; + + printf("Extent status:ip=%d\n", ip->i_number); + if (!(ip->i_flag & IN_E4EXTENTS)) + return; + + ext4_ext_print_header(ip, ehp); + + return; +} +#endif + +static inline struct ext4_extent_header * +ext4_ext_inode_header(struct inode *ip) +{ + + return ((struct ext4_extent_header *)ip->i_db); +} + +static inline struct ext4_extent_header * +ext4_ext_block_header(char *bdata) +{ + + return ((struct ext4_extent_header *)bdata); +} + +static inline unsigned short +ext4_ext_inode_depth(struct inode *ip) +{ + struct ext4_extent_header *ehp; + + ehp = (struct ext4_extent_header *)ip->i_data; + return (ehp->eh_depth); +} + +static inline e4fs_daddr_t +ext4_ext_index_pblock(struct ext4_extent_index *index) +{ + e4fs_daddr_t blk; + + blk = index->ei_leaf_lo; + blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32; + + return (blk); +} + +static inline void +ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb) +{ + + index->ei_leaf_lo = pb & 0xffffffff; + index->ei_leaf_hi = (pb >> 32) & 0xffff; +} + + +static inline e4fs_daddr_t +ext4_ext_extent_pblock(struct ext4_extent *extent) +{ + e4fs_daddr_t blk; + + blk = extent->e_start_lo; + blk |= (e4fs_daddr_t)extent->e_start_hi << 32; + + return (blk); +} + +static inline void +ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb) +{ + + ex->e_start_lo = pb & 0xffffffff; + ex->e_start_hi = (pb >> 32) & 0xffff; +} + int ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep) { struct ext4_extent_cache *ecp; int ret = EXT4_EXT_CACHE_NO; ecp = &ip->i_ext_cache; - - /* cache is invalid */ if (ecp->ec_type == EXT4_EXT_CACHE_NO) return (ret); if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) { ep->e_blk = ecp->ec_blk; ep->e_start_lo = ecp->ec_start & 0xffffffff; ep->e_start_hi = ecp->ec_start >> 32 & 0xffff; ep->e_len = ecp->ec_len; ret = ecp->ec_type; } return (ret); } -/* - * Put an ext4_extent structure in ext4 cache. - */ +static int +ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh) +{ + struct m_ext2fs *fs; + char *error_msg; + + fs = ip->i_e2fs; + + if (eh->eh_magic != EXT4_EXT_MAGIC) { + error_msg = "invalid magic"; + goto corrupted; + } + if (eh->eh_max == 0) { + error_msg = "invalid eh_max"; + goto corrupted; + } + if (eh->eh_ecount > eh->eh_max) { + error_msg = "invalid eh_entries"; + goto corrupted; + } + + return (0); + +corrupted: + ext2_fserr(fs, ip->i_uid, error_msg); + return (EIO); +} + +static void +ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk) +{ + struct ext4_extent_header *eh; + struct ext4_extent_index *r, *l, *m; + + eh = path->ep_header; + + KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0, + ("ext4_ext_binsearch_index: bad args")); + + l = EXT_FIRST_INDEX(eh) + 1; + r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1; + while (l <= r) { + m = l + (r - l) / 2; + if (blk < m->ei_blk) + r = m - 1; + else + l = m + 1; + } + + path->ep_index = l - 1; +} + +static void +ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk) +{ + struct ext4_extent_header *eh; + struct ext4_extent *r, *l, *m; + + eh = path->ep_header; + + KASSERT(eh->eh_ecount <= eh->eh_max, + ("ext4_ext_binsearch_ext: bad args")); + + if (eh->eh_ecount == 0) + return; + + l = EXT_FIRST_EXTENT(eh) + 1; + r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1; + + while (l <= r) { + m = l + (r - l) / 2; + if (blk < m->e_blk) + r = m - 1; + else + l = m + 1; + } + + path->ep_ext = l - 1; +} + +static int +ext4_ext_fill_path_bdata(struct ext4_extent_path *path, + struct buf *bp, uint64_t blk) +{ + + KASSERT(path->ep_data == NULL, + ("ext4_ext_fill_path_bdata: bad ep_data")); + + path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK); + if (!path->ep_data) + return (ENOMEM); + + memcpy(path->ep_data, bp->b_data, bp->b_bufsize); + path->ep_blk = blk; + + return (0); +} + +static void +ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp) +{ + + KASSERT(path->ep_data != NULL, + ("ext4_ext_fill_path_buf: bad ep_data")); + + memcpy(bp->b_data, path->ep_data, bp->b_bufsize); +} + +static void +ext4_ext_drop_refs(struct ext4_extent_path *path) +{ + int depth, i; + + if (!path) + return; + + depth = path->ep_depth; + for (i = 0; i <= depth; i++, path++) + if (path->ep_data) { + free(path->ep_data, M_EXT2EXTENTS); + path->ep_data = NULL; + } +} + void -ext4_ext_put_cache(struct inode *ip, struct ext4_extent *ep, int type) +ext4_ext_path_free(struct ext4_extent_path *path) { - struct ext4_extent_cache *ecp; - ecp = &ip->i_ext_cache; - ecp->ec_type = type; - ecp->ec_blk = ep->e_blk; - ecp->ec_len = ep->e_len; - ecp->ec_start = (daddr_t)ep->e_start_hi << 32 | ep->e_start_lo; + if (!path) + return; + + ext4_ext_drop_refs(path); + free(path, M_EXT2EXTENTS); } -/* - * Find an extent. - */ -struct ext4_extent_path * -ext4_ext_find_extent(struct m_ext2fs *fs, struct inode *ip, - daddr_t lbn, struct ext4_extent_path *path) +int +ext4_ext_find_extent(struct inode *ip, daddr_t block, + struct ext4_extent_path **ppath) { + struct m_ext2fs *fs; + struct ext4_extent_header *eh; + struct ext4_extent_path *path; + struct buf *bp; + uint64_t blk; + int error, depth, i, ppos, alloc; + + fs = ip->i_e2fs; + eh = ext4_ext_inode_header(ip); + depth = ext4_ext_inode_depth(ip); + ppos = 0; + alloc = 0; + + error = ext4_ext_check_header(ip, eh); + if (error) + return (error); + + if (!ppath) + return (EINVAL); + + path = *ppath; + if (!path) { + path = malloc(EXT4_EXT_DEPTH_MAX * + sizeof(struct ext4_extent_path), + M_EXT2EXTENTS, M_WAITOK | M_ZERO); + if (!path) + return (ENOMEM); + + *ppath = path; + alloc = 1; + } + + path[0].ep_header = eh; + path[0].ep_data = NULL; + + /* Walk through the tree. */ + i = depth; + while (i) { + ext4_ext_binsearch_index(&path[ppos], block); + blk = ext4_ext_index_pblock(path[ppos].ep_index); + path[ppos].ep_depth = i; + path[ppos].ep_ext = NULL; + + error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk), + ip->i_e2fs->e2fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + goto error; + } + + ppos++; + if (ppos > depth) { + ext2_fserr(fs, ip->i_uid, + "ppos > depth => extent corrupted"); + error = EIO; + brelse(bp); + goto error; + } + + ext4_ext_fill_path_bdata(&path[ppos], bp, blk); + brelse(bp); + + eh = ext4_ext_block_header(path[ppos].ep_data); + error = ext4_ext_check_header(ip, eh); + if (error) + goto error; + + path[ppos].ep_header = eh; + + i--; + } + + error = ext4_ext_check_header(ip, eh); + if (error) + goto error; + + /* Find extent. */ + path[ppos].ep_depth = i; + path[ppos].ep_header = eh; + path[ppos].ep_ext = NULL; + path[ppos].ep_index = NULL; + ext4_ext_binsearch_ext(&path[ppos], block); + return (0); + +error: + ext4_ext_drop_refs(path); + if (alloc) + free(path, M_EXT2EXTENTS); + + *ppath = NULL; + + return (error); +} + +static inline int +ext4_ext_space_root(struct inode *ip) +{ + int size; + + size = sizeof(ip->i_data); + size -= sizeof(struct ext4_extent_header); + size /= sizeof(struct ext4_extent); + + return (size); +} + +static inline int +ext4_ext_space_block(struct inode *ip) +{ + struct m_ext2fs *fs; + int size; + + fs = ip->i_e2fs; + + size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / + sizeof(struct ext4_extent); + + return (size); +} + +static inline int +ext4_ext_space_block_index(struct inode *ip) +{ + struct m_ext2fs *fs; + int size; + + fs = ip->i_e2fs; + + size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) / + sizeof(struct ext4_extent_index); + + return (size); +} + +void +ext4_ext_tree_init(struct inode *ip) +{ struct ext4_extent_header *ehp; - uint16_t i; - int error, size; - daddr_t nblk; - ehp = (struct ext4_extent_header *)(char *)ip->i_db; + ip->i_flag |= IN_E4EXTENTS; - if (ehp->eh_magic != EXT4_EXT_MAGIC) - return (NULL); + memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR); + ehp = (struct ext4_extent_header *)ip->i_data; + ehp->eh_magic = EXT4_EXT_MAGIC; + ehp->eh_max = ext4_ext_space_root(ip); + ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + ext2_update(ip->i_vnode, 1); +} - path->ep_header = ehp; +static inline void +ext4_ext_put_in_cache(struct inode *ip, uint32_t blk, + uint32_t len, uint32_t start, int type) +{ - daddr_t first_lbn = 0; - daddr_t last_lbn = lblkno(ip->i_e2fs, ip->i_size); + KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input")); - for (i = ehp->eh_depth; i != 0; --i) { - path->ep_depth = i; - path->ep_ext = NULL; - if (ext4_ext_binsearch_index(ip, path, lbn, &first_lbn, - &last_lbn)) { - return (path); + ip->i_ext_cache.ec_type = type; + ip->i_ext_cache.ec_blk = blk; + ip->i_ext_cache.ec_len = len; + ip->i_ext_cache.ec_start = start; +} + +static e4fs_daddr_t +ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path, + e4fs_daddr_t block) +{ + struct m_ext2fs *fs; + struct ext4_extent *ex; + e4fs_daddr_t bg_start; + int depth; + + fs = ip->i_e2fs; + + if (path) { + depth = path->ep_depth; + ex = path[depth].ep_ext; + if (ex) { + e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex); + e2fs_daddr_t blk = ex->e_blk; + + if (block > blk) + return (pblk + (block - blk)); + else + return (pblk - (blk - block)); } - nblk = (daddr_t)path->ep_index->ei_leaf_hi << 32 | - path->ep_index->ei_leaf_lo; - size = blksize(fs, ip, nblk); - if (path->ep_bp != NULL) { - brelse(path->ep_bp); - path->ep_bp = NULL; + /* Try to get block from index itself. */ + if (path[depth].ep_data) + return (path[depth].ep_blk); + } + + /* Use inode's group. */ + bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) + + fs->e2fs->e2fs_first_dblock; + + return (bg_start + block); +} + +static int inline +ext4_can_extents_be_merged(struct ext4_extent *ex1, + struct ext4_extent *ex2) +{ + + if (ex1->e_blk + ex1->e_len != ex2->e_blk) + return (0); + + if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN) + return (0); + + if (ext4_ext_extent_pblock(ex1) + ex1->e_len == + ext4_ext_extent_pblock(ex2)) + return (1); + + return (0); +} + +static unsigned +ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path) +{ + int depth = path->ep_depth; + + /* Empty tree */ + if (depth == 0) + return (EXT4_MAX_BLOCKS); + + /* Go to indexes. */ + depth--; + + while (depth >= 0) { + if (path[depth].ep_index != + EXT_LAST_INDEX(path[depth].ep_header)) + return (path[depth].ep_index[1].ei_blk); + + depth--; + } + + return (EXT4_MAX_BLOCKS); +} + +static int +ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path) +{ + struct m_ext2fs *fs; + struct buf *bp; + uint64_t blk; + int error; + + fs = ip->i_e2fs; + + if (!path) + return (EINVAL); + + if (path->ep_data) { + blk = path->ep_blk; + bp = getblk(ip->i_devvp, fsbtodb(fs, blk), + fs->e2fs_bsize, 0, 0, 0); + if (!bp) + return (EIO); + ext4_ext_fill_path_buf(path, bp); + error = bwrite(bp); + } else { + ip->i_flag |= IN_CHANGE | IN_UPDATE; + error = ext2_update(ip->i_vnode, 1); + } + + return (error); +} + +static int +ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path, + uint32_t lblk, e4fs_daddr_t blk) +{ + struct m_ext2fs *fs; + struct ext4_extent_index *idx; + int len; + + fs = ip->i_e2fs; + + if (lblk == path->ep_index->ei_blk) { + ext2_fserr(fs, ip->i_uid, + "lblk == index blk => extent corrupted"); + return (EIO); + } + + if (path->ep_header->eh_ecount >= path->ep_header->eh_max) { + ext2_fserr(fs, ip->i_uid, + "ecout > maxcount => extent corrupted"); + return (EIO); + } + + if (lblk > path->ep_index->ei_blk) { + /* Insert after. */ + idx = path->ep_index + 1; + } else { + /* Insert before. */ + idx = path->ep_index; + } + + len = EXT_LAST_INDEX(path->ep_header) - idx + 1; + if (len > 0) + memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index)); + + if (idx > EXT_MAX_INDEX(path->ep_header)) { + ext2_fserr(fs, ip->i_uid, + "index is out of range => extent corrupted"); + return (EIO); + } + + idx->ei_blk = lblk; + ext4_index_store_pblock(idx, blk); + path->ep_header->eh_ecount++; + + return (ext4_ext_dirty(ip, path)); +} + +static e4fs_daddr_t +ext4_ext_alloc_meta(struct inode *ip) +{ + e4fs_daddr_t blk = ext2_alloc_meta(ip); + if (blk) { + ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + ext2_update(ip->i_vnode, 1); + } + + return (blk); +} + +static void +ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags) +{ + struct m_ext2fs *fs; + int i, blocksreleased; + + fs = ip->i_e2fs; + blocksreleased = count; + + for(i = 0; i < count; i++) + ext2_blkfree(ip, blk + i, fs->e2fs_bsize); + + if (ip->i_blocks >= blocksreleased) + ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased); + else + ip->i_blocks = 0; + + ip->i_flag |= IN_CHANGE | IN_UPDATE; + ext2_update(ip->i_vnode, 1); +} + +static int +ext4_ext_split(struct inode *ip, struct ext4_extent_path *path, + struct ext4_extent *newext, int at) +{ + struct m_ext2fs *fs; + struct buf *bp; + int depth = ext4_ext_inode_depth(ip); + struct ext4_extent_header *neh; + struct ext4_extent_index *fidx; + struct ext4_extent *ex; + int i = at, k, m, a; + e4fs_daddr_t newblk, oldblk; + uint32_t border; + e4fs_daddr_t *ablks = NULL; + int error = 0; + + fs = ip->i_e2fs; + bp = NULL; + + /* + * We will split at current extent for now. + */ + if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) { + ext2_fserr(fs, ip->i_uid, + "extent is out of range => extent corrupted"); + return (EIO); + } + + if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header)) + border = path[depth].ep_ext[1].e_blk; + else + border = newext->e_blk; + + /* Allocate new blocks. */ + ablks = malloc(sizeof(e4fs_daddr_t) * depth, + M_EXT2EXTENTS, M_WAITOK | M_ZERO); + if (!ablks) + return (ENOMEM); + for (a = 0; a < depth - at; a++) { + newblk = ext4_ext_alloc_meta(ip); + if (newblk == 0) + goto cleanup; + ablks[a] = newblk; + } + + newblk = ablks[--a]; + bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); + if (!bp) { + error = EIO; + goto cleanup; + } + + neh = ext4_ext_block_header(bp->b_data); + neh->eh_ecount = 0; + neh->eh_max = ext4_ext_space_block(ip); + neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_depth = 0; + ex = EXT_FIRST_EXTENT(neh); + + if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) { + ext2_fserr(fs, ip->i_uid, + "extents count out of range => extent corrupted"); + error = EIO; + goto cleanup; + } + + /* Start copy from next extent. */ + m = 0; + path[depth].ep_ext++; + while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) { + path[depth].ep_ext++; + m++; + } + if (m) { + memmove(ex, path[depth].ep_ext - m, + sizeof(struct ext4_extent) * m); + neh->eh_ecount = neh->eh_ecount + m; + } + + bwrite(bp); + bp = NULL; + + /* Fix old leaf. */ + if (m) { + path[depth].ep_header->eh_ecount = + path[depth].ep_header->eh_ecount - m; + ext4_ext_dirty(ip, path + depth); + } + + /* Create intermediate indexes. */ + k = depth - at - 1; + KASSERT(k >= 0, ("ext4_ext_split: negative k")); + + /* Insert new index into current index block. */ + i = depth - 1; + while (k--) { + oldblk = newblk; + newblk = ablks[--a]; + error = bread(ip->i_devvp, fsbtodb(fs, newblk), + (int)fs->e2fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + goto cleanup; } - error = bread(ip->i_devvp, fsbtodb(fs, nblk), size, NOCRED, - &path->ep_bp); + + neh = (struct ext4_extent_header *)bp->b_data; + neh->eh_ecount = 1; + neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_max = ext4_ext_space_block_index(ip); + neh->eh_depth = depth - i; + fidx = EXT_FIRST_INDEX(neh); + fidx->ei_blk = border; + ext4_index_store_pblock(fidx, oldblk); + + m = 0; + path[i].ep_index++; + while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) { + path[i].ep_index++; + m++; + } + if (m) { + memmove(++fidx, path[i].ep_index - m, + sizeof(struct ext4_extent_index) * m); + neh->eh_ecount = neh->eh_ecount + m; + } + + bwrite(bp); + bp = NULL; + + /* Fix old index. */ + if (m) { + path[i].ep_header->eh_ecount = + path[i].ep_header->eh_ecount - m; + ext4_ext_dirty(ip, path + i); + } + + i--; + } + + error = ext4_ext_insert_index(ip, path + at, border, newblk); + +cleanup: + if (bp) + brelse(bp); + + if (error) { + for (i = 0; i < depth; i++) { + if (!ablks[i]) + continue; + ext4_ext_blkfree(ip, ablks[i], 1, 0); + } + } + + free(ablks, M_EXT2EXTENTS); + + return (error); +} + +static int +ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path, + struct ext4_extent *newext) +{ + struct m_ext2fs *fs; + struct ext4_extent_path *curpath; + struct ext4_extent_header *neh; + struct ext4_extent_index *fidx; + struct buf *bp; + e4fs_daddr_t newblk; + int error = 0; + + fs = ip->i_e2fs; + curpath = path; + + newblk = ext4_ext_alloc_meta(ip); + if (newblk == 0) + return (error); + + bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0); + if (!bp) + return (EIO); + + /* Move top-level index/leaf into new block. */ + memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data)); + + /* Set size of new block */ + neh = ext4_ext_block_header(bp->b_data); + neh->eh_magic = EXT4_EXT_MAGIC; + + if (ext4_ext_inode_depth(ip)) + neh->eh_max = ext4_ext_space_block_index(ip); + else + neh->eh_max = ext4_ext_space_block(ip); + + error = bwrite(bp); + if (error) + goto out; + + bp = NULL; + + curpath->ep_header->eh_magic = EXT4_EXT_MAGIC; + curpath->ep_header->eh_max = ext4_ext_space_root(ip); + curpath->ep_header->eh_ecount = 1; + curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header); + curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk; + ext4_index_store_pblock(curpath->ep_index, newblk); + + neh = ext4_ext_inode_header(ip); + fidx = EXT_FIRST_INDEX(neh); + neh->eh_depth = path->ep_depth + 1; + ext4_ext_dirty(ip, curpath); +out: + brelse(bp); + + return (error); +} + +static int +ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path, + struct ext4_extent *newext) +{ + struct m_ext2fs *fs; + struct ext4_extent_path *curpath; + int depth, i, error; + + fs = ip->i_e2fs; + +repeat: + i = depth = ext4_ext_inode_depth(ip); + + /* Look for free index entry int the tree */ + curpath = path + depth; + while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) { + i--; + curpath--; + } + + /* + * We use already allocated block for index block, + * so subsequent data blocks should be contiguous. + */ + if (EXT_HAS_FREE_INDEX(curpath)) { + error = ext4_ext_split(ip, path, newext, i); + if (error) + goto out; + + /* Refill path. */ + ext4_ext_drop_refs(path); + error = ext4_ext_find_extent(ip, newext->e_blk, &path); + if (error) + goto out; + } else { + /* Tree is full, do grow in depth. */ + error = ext4_ext_grow_indepth(ip, path, newext); + if (error) + goto out; + + /* Refill path. */ + ext4_ext_drop_refs(path); + error = ext4_ext_find_extent(ip, newext->e_blk, &path); + if (error) + goto out; + + /* Check and split tree if required. */ + depth = ext4_ext_inode_depth(ip); + if (path[depth].ep_header->eh_ecount == + path[depth].ep_header->eh_max) + goto repeat; + } + +out: + return (error); +} + +static int +ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path) +{ + struct ext4_extent_header *eh; + struct ext4_extent *ex; + int32_t border; + int depth, k; + + depth = ext4_ext_inode_depth(ip); + eh = path[depth].ep_header; + ex = path[depth].ep_ext; + + if (ex == NULL || eh == NULL) + return (EIO); + + if (!depth) + return (0); + + /* We will correct tree if first leaf got modified only. */ + if (ex != EXT_FIRST_EXTENT(eh)) + return (0); + + k = depth - 1; + border = path[depth].ep_ext->e_blk; + path[k].ep_index->ei_blk = border; + ext4_ext_dirty(ip, path + k); + while (k--) { + /* Change all left-side indexes. */ + if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header)) + break; + + path[k].ep_index->ei_blk = border; + ext4_ext_dirty(ip, path + k); + } + + return (0); +} + +static int +ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path, + struct ext4_extent *newext) +{ + struct m_ext2fs *fs; + struct ext4_extent_header * eh; + struct ext4_extent *ex, *nex, *nearex; + struct ext4_extent_path *npath; + int depth, len, error, next; + + fs = ip->i_e2fs; + depth = ext4_ext_inode_depth(ip); + ex = path[depth].ep_ext; + npath = NULL; + + if (newext->e_len == 0 || path[depth].ep_header == NULL) + return (EINVAL); + + /* Insert block into found extent. */ + if (ex && ext4_can_extents_be_merged(ex, newext)) { + ex->e_len = ex->e_len + newext->e_len; + eh = path[depth].ep_header; + nearex = ex; + goto merge; + } + +repeat: + depth = ext4_ext_inode_depth(ip); + eh = path[depth].ep_header; + if (eh->eh_ecount < eh->eh_max) + goto has_space; + + /* Try next leaf */ + nex = EXT_LAST_EXTENT(eh); + next = ext4_ext_next_leaf_block(ip, path); + if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) { + KASSERT(npath == NULL, + ("ext4_ext_insert_extent: bad path")); + + error = ext4_ext_find_extent(ip, next, &npath); + if (error) + goto cleanup; + + if (npath->ep_depth != path->ep_depth) { + error = EIO; + goto cleanup; + } + + eh = npath[depth].ep_header; + if (eh->eh_ecount < eh->eh_max) { + path = npath; + goto repeat; + } + } + + /* + * There is no free space in the found leaf, + * try to add a new leaf to the tree. + */ + error = ext4_ext_create_new_leaf(ip, path, newext); + if (error) + goto cleanup; + + depth = ext4_ext_inode_depth(ip); + eh = path[depth].ep_header; + +has_space: + nearex = path[depth].ep_ext; + if (!nearex) { + /* Create new extent in the leaf. */ + path[depth].ep_ext = EXT_FIRST_EXTENT(eh); + } else if (newext->e_blk > nearex->e_blk) { + if (nearex != EXT_LAST_EXTENT(eh)) { + len = EXT_MAX_EXTENT(eh) - nearex; + len = (len - 1) * sizeof(struct ext4_extent); + len = len < 0 ? 0 : len; + memmove(nearex + 2, nearex + 1, len); + } + path[depth].ep_ext = nearex + 1; + } else { + len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); + len = len < 0 ? 0 : len; + memmove(nearex + 1, nearex, len); + path[depth].ep_ext = nearex; + } + + eh->eh_ecount = eh->eh_ecount + 1; + nearex = path[depth].ep_ext; + nearex->e_blk = newext->e_blk; + nearex->e_start_lo = newext->e_start_lo; + nearex->e_start_hi = newext->e_start_hi; + nearex->e_len = newext->e_len; + +merge: + /* Try to merge extents to the right. */ + while (nearex < EXT_LAST_EXTENT(eh)) { + if (!ext4_can_extents_be_merged(nearex, nearex + 1)) + break; + + /* Merge with next extent. */ + nearex->e_len = nearex->e_len + nearex[1].e_len; + if (nearex + 1 < EXT_LAST_EXTENT(eh)) { + len = (EXT_LAST_EXTENT(eh) - nearex - 1) * + sizeof(struct ext4_extent); + memmove(nearex + 1, nearex + 2, len); + } + + eh->eh_ecount = eh->eh_ecount - 1; + KASSERT(eh->eh_ecount != 0, + ("ext4_ext_insert_extent: bad ecount")); + } + + /* + * Try to merge extents to the left, + * start from inexes correction. + */ + error = ext4_ext_correct_indexes(ip, path); + if (error) + goto cleanup; + + ext4_ext_dirty(ip, path + depth); + +cleanup: + if (npath) { + ext4_ext_drop_refs(npath); + free(npath, M_EXT2EXTENTS); + } + + ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO; + return (error); +} + +static e4fs_daddr_t +ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref, + struct ucred *cred, unsigned long *count, int *perror) +{ + struct m_ext2fs *fs; + struct ext2mount *ump; + e4fs_daddr_t newblk; + + fs = ip->i_e2fs; + ump = ip->i_ump; + + /* + * We will allocate only single block for now. + */ + if (*count > 1) + return (0); + + EXT2_LOCK(ip->i_ump); + *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk); + if (*perror) + return (0); + + if (newblk) { + ip->i_flag |= IN_CHANGE | IN_UPDATE; + ext2_update(ip->i_vnode, 1); + } + + return (newblk); +} + +int +ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk, + unsigned long max_blocks, struct ucred *cred, struct buf **bpp, + int *pallocated, uint32_t *nb) +{ + struct m_ext2fs *fs; + struct buf *bp = NULL; + struct ext4_extent_path *path; + struct ext4_extent newex, *ex; + e4fs_daddr_t bpref, newblk = 0; + unsigned long allocated = 0; + int error = 0, depth; + + fs = ip->i_e2fs; + *pallocated = 0; + path = NULL; + if(bpp) + *bpp = NULL; + + /* Check cache. */ + if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) { + if (bpref == EXT4_EXT_CACHE_IN) { + /* Block is already allocated. */ + newblk = iblk - newex.e_blk + + ext4_ext_extent_pblock(&newex); + allocated = newex.e_len - (iblk - newex.e_blk); + goto out; + } else { + error = EIO; + goto out2; + } + } + + error = ext4_ext_find_extent(ip, iblk, &path); + if (error) { + goto out2; + } + + depth = ext4_ext_inode_depth(ip); + if (path[depth].ep_ext == NULL && depth != 0) { + error = EIO; + goto out2; + } + + if ((ex = path[depth].ep_ext)) { + uint64_t lblk = ex->e_blk; + uint16_t e_len = ex->e_len; + e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex); + + if (e_len > EXT4_MAX_LEN) + goto out2; + + /* If we found extent covers block, simply return it. */ + if (iblk >= lblk && iblk < lblk + e_len) { + newblk = iblk - lblk + e_start; + allocated = e_len - (iblk - lblk); + ext4_ext_put_in_cache(ip, lblk, e_len, + e_start, EXT4_EXT_CACHE_IN); + goto out; + } + } + + /* Allocate the new block. */ + if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) { + ip->i_next_alloc_goal = 0; + } + + bpref = ext4_ext_blkpref(ip, path, iblk); + allocated = max_blocks; + newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error); + if (!newblk) + goto out2; + + /* Try to insert new extent into found leaf and return. */ + newex.e_blk = iblk; + ext4_ext_store_pblock(&newex, newblk); + newex.e_len = allocated; + error = ext4_ext_insert_extent(ip, path, &newex); + if (error) + goto out2; + + newblk = ext4_ext_extent_pblock(&newex); + ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN); + *pallocated = 1; + +out: + if (allocated > max_blocks) + allocated = max_blocks; + + if (bpp) + { + error = bread(ip->i_devvp, fsbtodb(fs, newblk), + fs->e2fs_bsize, cred, &bp); if (error) { - brelse(path->ep_bp); - path->ep_bp = NULL; - return (NULL); + brelse(bp); + } else { + *bpp = bp; } - ehp = (struct ext4_extent_header *)path->ep_bp->b_data; - path->ep_header = ehp; } - path->ep_depth = i; - path->ep_ext = NULL; - path->ep_index = NULL; - path->ep_is_sparse = false; +out2: + if (path) { + ext4_ext_drop_refs(path); + free(path, M_EXT2EXTENTS); + } - ext4_ext_binsearch(ip, path, lbn, first_lbn, last_lbn); - return (path); + if (nb) + *nb = newblk; + + return (error); +} + +static inline uint16_t +ext4_ext_get_actual_len(struct ext4_extent *ext) +{ + + return (ext->e_len <= EXT_INIT_MAX_LEN ? + ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN)); +} + +static inline struct ext4_extent_header * +ext4_ext_header(struct inode *ip) +{ + + return (struct ext4_extent_header *)ip->i_db; +} + +static int +ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex, + unsigned long from, unsigned long to) +{ + unsigned long num, start; + + if (from >= ex->e_blk && + to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) { + /* Tail cleanup. */ + num = ex->e_blk + ext4_ext_get_actual_len(ex) - from; + start = ext4_ext_extent_pblock(ex) + + ext4_ext_get_actual_len(ex) - num; + ext4_ext_blkfree(ip, start, num, 0); + } + + return (0); +} + +static int +ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path) +{ + e4fs_daddr_t leaf; + + /* Free index block. */ + path--; + leaf = ext4_ext_index_pblock(path->ep_index); + KASSERT(path->ep_header->eh_ecount != 0, + ("ext4_ext_rm_index: bad ecount")); + path->ep_header->eh_ecount--; + ext4_ext_dirty(ip, path); + ext4_ext_blkfree(ip, leaf, 1, 0); + return (0); +} + +static int +ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path, + uint64_t start) +{ + struct m_ext2fs *fs; + int depth, credits; + struct ext4_extent_header *eh; + unsigned int a, b, block, num; + unsigned long ex_blk; + unsigned short ex_len; + struct ext4_extent *ex; + int error, correct_index; + + fs = ip->i_e2fs; + depth = ext4_ext_inode_depth(ip); + correct_index = 0; + + if (!path[depth].ep_header) { + if (path[depth].ep_data == NULL) + return (EINVAL); + path[depth].ep_header = + (struct ext4_extent_header* )path[depth].ep_data; + } + + eh = path[depth].ep_header; + if (!eh) { + ext2_fserr(fs, ip->i_uid, "bad header => extent corrupted"); + return (EIO); + } + + ex = EXT_LAST_EXTENT(eh); + ex_blk = ex->e_blk; + ex_len = ext4_ext_get_actual_len(ex); + + while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) { + path[depth].ep_ext = ex; + a = ex_blk > start ? ex_blk : start; + b = (uint64_t)ex_blk + ex_len - 1 < + EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS; + + if (a != ex_blk && b != ex_blk + ex_len - 1) + return (EINVAL); + else if (a != ex_blk) { + /* Remove tail of the extent. */ + block = ex_blk; + num = a - block; + } else if (b != ex_blk + ex_len - 1) { + /* Remove head of the extent, not implemented. */ + return (EINVAL); + } else { + /* Remove whole extent. */ + block = ex_blk; + num = 0; + KASSERT(a == ex_blk, ("ext4_ext_rm_leaf: bad a")); + KASSERT(b != ex_blk + ex_len - 1, + ("ext4_ext_rm_leaf: bad b")); + } + + credits = EXT4_EXT_DEPTH_MAX; + if (ex == EXT_FIRST_EXTENT(eh)) { + correct_index = 1; + credits += (ext4_ext_inode_depth(ip)) + 1; + } + + error = ext4_remove_blocks(ip, ex, a, b); + if (error) + goto out; + + if (num == 0) { + ext4_ext_store_pblock(ex, 0); + eh->eh_ecount--; + } + + ex->e_blk = block; + ex->e_len = num; + + ext4_ext_dirty(ip, path + depth); + + ex--; + ex_blk = ex->e_blk; + ex_len = ext4_ext_get_actual_len(ex); + }; + + if (correct_index && eh->eh_ecount) + error = ext4_ext_correct_indexes(ip, path); + + /* + * If this leaf is free, we should + * remove it from index block above. + */ + if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL) + error = ext4_ext_rm_index(ip, path + depth); + +out: + return (error); +} + +static struct buf * +ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk, + int depth, int flags) +{ + struct m_ext2fs *fs; + struct ext4_extent_header *eh; + struct buf *bp; + int error; + + fs = ip->i_e2fs; + + error = bread(ip->i_devvp, fsbtodb(fs, pblk), + fs->e2fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (NULL); + } + + eh = ext4_ext_block_header(bp->b_data); + if (eh->eh_depth != depth) { + ext2_fserr(fs, ip->i_uid, "unexpected eh_depth"); + goto err; + } + + error = ext4_ext_check_header(ip, eh); + if (error) + goto err; + + return (bp); + +err: + brelse(bp); + return (NULL); + +} + +static int inline +ext4_ext_more_to_rm(struct ext4_extent_path *path) +{ + + KASSERT(path->ep_index != NULL, + ("ext4_ext_more_to_rm: bad index from path")); + + if (path->ep_index < EXT_FIRST_INDEX(path->ep_header)) + return (0); + + if (path->ep_header->eh_ecount == path->index_count) + return (0); + + return (1); +} + +int +ext4_ext_remove_space(struct inode *ip, off_t length, int flags, + struct ucred *cred, struct thread *td) +{ + struct buf *bp; + struct ext4_extent_header *ehp; + struct ext4_extent_path *path; + int depth; + int i, error; + + ehp = (struct ext4_extent_header *)ip->i_db; + depth = ext4_ext_inode_depth(ip); + + error = ext4_ext_check_header(ip, ehp); + if(error) + return (error); + + path = malloc(sizeof(struct ext4_extent_path) * (depth + 1), + M_EXT2EXTENTS, M_WAITOK | M_ZERO); + if (!path) + return (ENOMEM); + + i = 0; + path[0].ep_header = ehp; + path[0].ep_depth = depth; + while (i >= 0 && error == 0) { + if (i == depth) { + /* This is leaf. */ + error = ext4_ext_rm_leaf(ip, path, length); + if (error) + break; + free(path[i].ep_data, M_EXT2EXTENTS); + path[i].ep_data = NULL; + i--; + continue; + } + + /* This is index. */ + if (!path[i].ep_header) + path[i].ep_header = + (struct ext4_extent_header *)path[i].ep_data; + + if (!path[i].ep_index) { + /* This level hasn't touched yet. */ + path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header); + path[i].index_count = path[i].ep_header->eh_ecount + 1; + } else { + /* We've already was here, see at next index. */ + path[i].ep_index--; + } + + if (ext4_ext_more_to_rm(path + i)) { + memset(path + i + 1, 0, sizeof(*path)); + bp = ext4_read_extent_tree_block(ip, + ext4_ext_index_pblock(path[i].ep_index), + path[0].ep_depth - (i + 1), 0); + if (!bp) { + error = EIO; + break; + } + + ext4_ext_fill_path_bdata(&path[i+1], bp, + ext4_ext_index_pblock(path[i].ep_index)); + brelse(bp); + path[i].index_count = path[i].ep_header->eh_ecount; + i++; + } else { + if (path[i].ep_header->eh_ecount == 0 && i > 0) { + /* Index is empty, remove it. */ + error = ext4_ext_rm_index(ip, path + i); + } + free(path[i].ep_data, M_EXT2EXTENTS); + path[i].ep_data = NULL; + i--; + } + } + + if (path->ep_header->eh_ecount == 0) { + /* + * Truncate the tree to zero. + */ + ext4_ext_header(ip)->eh_depth = 0; + ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip); + ext4_ext_dirty(ip, path); + + } + + ext4_ext_drop_refs(path); + free(path, M_EXT2EXTENTS); + + return (error); } Index: head/sys/fs/ext2fs/ext2_extents.h =================================================================== --- head/sys/fs/ext2fs/ext2_extents.h (revision 324705) +++ head/sys/fs/ext2fs/ext2_extents.h (revision 324706) @@ -1,104 +1,126 @@ /*- * Copyright (c) 2012, 2010 Zheng Liu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FS_EXT2FS_EXT2_EXTENTS_H_ #define _FS_EXT2FS_EXT2_EXTENTS_H_ #include #define EXT4_EXT_MAGIC 0xf30a +#define EXT4_MAX_BLOCKS 0xffffffff +#define EXT_INIT_MAX_LEN (1UL << 15) +#define EXT4_MAX_LEN (EXT_INIT_MAX_LEN - 1) +#define EXT4_EXT_DEPTH_MAX 5 #define EXT4_EXT_CACHE_NO 0 #define EXT4_EXT_CACHE_GAP 1 #define EXT4_EXT_CACHE_IN 2 /* * Ext4 file system extent on disk. */ struct ext4_extent { uint32_t e_blk; /* first logical block */ uint16_t e_len; /* number of blocks */ uint16_t e_start_hi; /* high 16 bits of physical block */ uint32_t e_start_lo; /* low 32 bits of physical block */ }; /* * Extent index on disk. */ struct ext4_extent_index { uint32_t ei_blk; /* indexes logical blocks */ uint32_t ei_leaf_lo; /* points to physical block of the * next level */ uint16_t ei_leaf_hi; /* high 16 bits of physical block */ uint16_t ei_unused; }; /* * Extent tree header. */ struct ext4_extent_header { uint16_t eh_magic; /* magic number: 0xf30a */ uint16_t eh_ecount; /* number of valid entries */ uint16_t eh_max; /* capacity of store in entries */ uint16_t eh_depth; /* the depth of extent tree */ uint32_t eh_gen; /* generation of extent tree */ }; /* * Save cached extent. */ struct ext4_extent_cache { daddr_t ec_start; /* extent start */ uint32_t ec_blk; /* logical block */ uint32_t ec_len; uint32_t ec_type; }; /* * Save path to some extent. */ struct ext4_extent_path { + int index_count; uint16_t ep_depth; - struct buf *ep_bp; - bool ep_is_sparse; - union { - struct ext4_extent ep_sparse_ext; - struct ext4_extent *ep_ext; - }; + uint64_t ep_blk; + char *ep_data; + struct ext4_extent *ep_ext; struct ext4_extent_index *ep_index; struct ext4_extent_header *ep_header; }; +#define EXT_FIRST_EXTENT(hdr) ((struct ext4_extent *)(((char *)(hdr)) + \ + sizeof(struct ext4_extent_header))) +#define EXT_FIRST_INDEX(hdr) ((struct ext4_extent_index *)(((char *)(hdr)) + \ + sizeof(struct ext4_extent_header))) +#define EXT_LAST_EXTENT(hdr) (EXT_FIRST_EXTENT((hdr)) + (hdr)->eh_ecount - 1) +#define EXT_LAST_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_ecount - 1) +#define EXT4_EXTENT_TAIL_OFFSET(hdr) (sizeof(struct ext4_extent_header) + \ + (sizeof(struct ext4_extent) * (hdr)->eh_max)) +#define EXT_HAS_FREE_INDEX(path) \ + ((path)->ep_header->eh_ecount < (path)->ep_header->eh_max) +#define EXT_MAX_EXTENT(hdr) (EXT_FIRST_EXTENT(hdr) + ((hdr)->eh_max) - 1) +#define EXT_MAX_INDEX(hdr) (EXT_FIRST_INDEX((hdr)) + (hdr)->eh_max - 1) + struct inode; struct m_ext2fs; +void ext4_ext_tree_init(struct inode *ip); int ext4_ext_in_cache(struct inode *, daddr_t, struct ext4_extent *); void ext4_ext_put_cache(struct inode *, struct ext4_extent *, int); -struct ext4_extent_path * -ext4_ext_find_extent(struct m_ext2fs *fs, - struct inode *, daddr_t, struct ext4_extent_path *); +int ext4_ext_find_extent(struct inode *, daddr_t, struct ext4_extent_path **); +void ext4_ext_path_free(struct ext4_extent_path *path); +int ext4_ext_remove_space(struct inode *ip, off_t length, int flags, + struct ucred *cred, struct thread *td); +int ext4_ext_get_blocks(struct inode *ip, int64_t iblock, + unsigned long max_blocks, struct ucred *cred, struct buf **bpp, int *allocate, uint32_t *); +#ifdef EXT2FS_DEBUG +void ext4_ext_print_extent_tree_status(struct inode * ip); +#endif #endif /* !_FS_EXT2FS_EXT2_EXTENTS_H_ */ Index: head/sys/fs/ext2fs/ext2_extern.h =================================================================== --- head/sys/fs/ext2fs/ext2_extern.h (revision 324705) +++ head/sys/fs/ext2fs/ext2_extern.h (revision 324706) @@ -1,117 +1,119 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_extern.h 8.3 (Berkeley) 4/16/94 * $FreeBSD$ */ #ifndef _FS_EXT2FS_EXT2_EXTERN_H_ #define _FS_EXT2FS_EXT2_EXTERN_H_ struct ext2fs_dinode; struct ext2fs_direct_2; struct ext2fs_searchslot; struct indir; struct inode; struct mount; struct vfsconf; struct vnode; int ext2_add_entry(struct vnode *, struct ext2fs_direct_2 *); int ext2_alloc(struct inode *, daddr_t, e4fs_daddr_t, int, struct ucred *, e4fs_daddr_t *); -daddr_t ext2_allocfacl(struct inode *ip); +daddr_t ext2_alloc_meta(struct inode *ip); int ext2_balloc(struct inode *, e2fs_lbn_t, int, struct ucred *, struct buf **, int); int ext2_blkatoff(struct vnode *, off_t, char **, struct buf **); void ext2_blkfree(struct inode *, e4fs_daddr_t, long); e4fs_daddr_t ext2_blkpref(struct inode *, e2fs_lbn_t, int, e2fs_daddr_t *, e2fs_daddr_t); int ext2_bmap(struct vop_bmap_args *); int ext2_bmaparray(struct vnode *, daddr_t, daddr_t *, int *, int *); +int ext4_bmapext(struct vnode *, int32_t, int64_t *, int *, int *); void ext2_clusteracct(struct m_ext2fs *, char *, int, daddr_t, int); void ext2_dirbad(struct inode *ip, doff_t offset, char *how); void ext2_fserr(struct m_ext2fs *, uid_t, char *); void ext2_ei2i(struct ext2fs_dinode *, struct inode *); int ext2_getlbns(struct vnode *, daddr_t, struct indir *, int *); int ext2_i2ei(struct inode *, struct ext2fs_dinode *); void ext2_itimes(struct vnode *vp); int ext2_reallocblks(struct vop_reallocblks_args *); int ext2_reclaim(struct vop_reclaim_args *); int ext2_truncate(struct vnode *, off_t, int, struct ucred *, struct thread *); int ext2_update(struct vnode *, int); int ext2_valloc(struct vnode *, int, struct ucred *, struct vnode **); int ext2_vfree(struct vnode *, ino_t, int); int ext2_vinit(struct mount *, struct vop_vector *, struct vnode **vpp); int ext2_lookup(struct vop_cachedlookup_args *); int ext2_readdir(struct vop_readdir_args *); #ifdef EXT2FS_DEBUG void ext2_print_inode(struct inode *); #endif int ext2_direnter(struct inode *, struct vnode *, struct componentname *); int ext2_dirremove(struct vnode *, struct componentname *); int ext2_dirrewrite(struct inode *, struct inode *, struct componentname *); int ext2_dirempty(struct inode *, ino_t, struct ucred *); int ext2_checkpath(struct inode *, struct inode *, struct ucred *); int ext2_cg_has_sb(struct m_ext2fs *fs, int cg); int ext2_inactive(struct vop_inactive_args *); int ext2_htree_add_entry(struct vnode *, struct ext2fs_direct_2 *, struct componentname *); int ext2_htree_create_index(struct vnode *, struct componentname *, struct ext2fs_direct_2 *); int ext2_htree_has_idx(struct inode *); int ext2_htree_hash(const char *, int, uint32_t *, int, uint32_t *, uint32_t *); int ext2_htree_lookup(struct inode *, const char *, int, struct buf **, int *, doff_t *, doff_t *, doff_t *, struct ext2fs_searchslot *); int ext2_search_dirblock(struct inode *, void *, int *, const char *, int, int *, doff_t *, doff_t *, doff_t *, struct ext2fs_searchslot *); int ext2_gd_csum_verify(struct m_ext2fs *fs, struct cdev *dev); void ext2_gd_csum_set(struct m_ext2fs *fs); +void ext2_fserr(struct m_ext2fs *, uid_t, char *); /* Flags to low-level allocation routines. * The low 16-bits are reserved for IO_ flags from vnode.h. */ #define BA_CLRBUF 0x00010000 /* Clear invalid areas of buffer. */ #define BA_SEQMASK 0x7F000000 /* Bits holding seq heuristic. */ #define BA_SEQSHIFT 24 #define BA_SEQMAX 0x7F extern struct vop_vector ext2_vnodeops; extern struct vop_vector ext2_fifoops; #endif /* !_FS_EXT2FS_EXT2_EXTERN_H_ */ Index: head/sys/fs/ext2fs/ext2_inode.c =================================================================== --- head/sys/fs/ext2fs/ext2_inode.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_inode.c (revision 324706) @@ -1,535 +1,640 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_inode.c 8.5 (Berkeley) 12/30/93 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -static int ext2_indirtrunc(struct inode *, daddr_t, daddr_t, - daddr_t, int, e4fs_daddr_t *); - /* * Update the access, modified, and inode change times as specified by the * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. Write the inode * to disk if the IN_MODIFIED flag is set (it may be set initially, or by * the timestamp update). The IN_LAZYMOD flag is set to force a write * later if not now. If we write now, then clear both IN_MODIFIED and * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is * set, then wait for the write to complete. */ int ext2_update(struct vnode *vp, int waitfor) { struct m_ext2fs *fs; struct buf *bp; struct inode *ip; int error; ASSERT_VOP_ELOCKED(vp, "ext2_update"); ext2_itimes(vp); ip = VTOI(vp); if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0) return (0); ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); fs = ip->i_e2fs; if (fs->e2fs_ronly) return (0); if ((error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } error = ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number))); if (error) { brelse(bp); return (error); } if (waitfor && !DOINGASYNC(vp)) return (bwrite(bp)); else { bdwrite(bp); return (0); } } #define SINGLE 0 /* index of single indirect block */ #define DOUBLE 1 /* index of double indirect block */ #define TRIPLE 2 /* index of triple indirect block */ + /* + * Release blocks associated with the inode ip and stored in the indirect + * block bn. Blocks are free'd in LIFO order up to (but not including) + * lastbn. If level is greater than SINGLE, the block is an indirect block + * and recursive calls to indirtrunc must be used to cleanse other indirect + * blocks. + * + * NB: triple indirect blocks are untested. + */ +static int +ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, + daddr_t lastbn, int level, e4fs_daddr_t *countp) +{ + struct buf *bp; + struct m_ext2fs *fs = ip->i_e2fs; + struct vnode *vp; + e2fs_daddr_t *bap, *copy; + int i, nblocks, error = 0, allerror = 0; + e2fs_lbn_t nb, nlbn, last; + e4fs_daddr_t blkcount, factor, blocksreleased = 0; + + /* + * Calculate index in current block of last + * block to be kept. -1 indicates the entire + * block so we need not calculate the index. + */ + factor = 1; + for (i = SINGLE; i < level; i++) + factor *= NINDIR(fs); + last = lastbn; + if (lastbn > 0) + last /= factor; + nblocks = btodb(fs->e2fs_bsize); + /* + * Get buffer of block pointers, zero those entries corresponding + * to blocks to be free'd, and update on disk copy first. Since + * double(triple) indirect before single(double) indirect, calls + * to bmap on these blocks will fail. However, we already have + * the on disk address, so we have to set the b_blkno field + * explicitly instead of letting bread do everything for us. + */ + vp = ITOV(ip); + bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); + if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { + bp->b_iocmd = BIO_READ; + if (bp->b_bcount > bp->b_bufsize) + panic("ext2_indirtrunc: bad buffer size"); + bp->b_blkno = dbn; + vfs_busy_pages(bp, 0); + bp->b_iooffset = dbtob(bp->b_blkno); + bstrategy(bp); + error = bufwait(bp); + } + if (error) { + brelse(bp); + *countp = 0; + return (error); + } + bap = (e2fs_daddr_t *)bp->b_data; + copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); + bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); + bzero((caddr_t)&bap[last + 1], + (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); + if (last == -1) + bp->b_flags |= B_INVAL; + if (DOINGASYNC(vp)) { + bdwrite(bp); + } else { + error = bwrite(bp); + if (error) + allerror = error; + } + bap = copy; + + /* + * Recursively free totally unused blocks. + */ + for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; + i--, nlbn += factor) { + nb = bap[i]; + if (nb == 0) + continue; + if (level > SINGLE) { + if ((error = ext2_indirtrunc(ip, nlbn, + fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) + allerror = error; + blocksreleased += blkcount; + } + ext2_blkfree(ip, nb, fs->e2fs_bsize); + blocksreleased += nblocks; + } + + /* + * Recursively free last partial block. + */ + if (level > SINGLE && lastbn >= 0) { + last = lastbn % factor; + nb = bap[i]; + if (nb != 0) { + if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), + last, level - 1, &blkcount)) != 0) + allerror = error; + blocksreleased += blkcount; + } + } + free(copy, M_TEMP); + *countp = blocksreleased; + return (allerror); +} + +/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ -int -ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, +static int +ext2_ind_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct vnode *ovp = vp; int32_t lastblock; struct inode *oip; int32_t bn, lbn, lastiblock[EXT2_NIADDR], indir_lbn[EXT2_NIADDR]; uint32_t oldblks[EXT2_NDADDR + EXT2_NIADDR]; uint32_t newblks[EXT2_NDADDR + EXT2_NIADDR]; struct m_ext2fs *fs; struct buf *bp; int offset, size, level; e4fs_daddr_t count, nblocks, blocksreleased = 0; int error, i, allerror; off_t osize; #ifdef INVARIANTS struct bufobj *bo; #endif oip = VTOI(ovp); #ifdef INVARIANTS bo = &ovp->v_bufobj; #endif - ASSERT_VOP_LOCKED(vp, "ext2_truncate"); - - if (length < 0) - return (EINVAL); - - if (ovp->v_type == VLNK && - oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { -#ifdef INVARIANTS - if (length != 0) - panic("ext2_truncate: partial truncate of symlink"); -#endif - bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); - oip->i_size = 0; - oip->i_flag |= IN_CHANGE | IN_UPDATE; - return (ext2_update(ovp, 1)); - } - if (oip->i_size == length) { - oip->i_flag |= IN_CHANGE | IN_UPDATE; - return (ext2_update(ovp, 0)); - } fs = oip->i_e2fs; osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > oip->i_e2fs->e2fs_maxfilesize) return (EFBIG); vnode_pager_setsize(ovp, length); offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); if (error) { vnode_pager_setsize(vp, osize); return (error); } oip->i_size = length; if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, !DOINGASYNC(ovp))); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundary, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessible again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - EXT2_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->e2fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[EXT2_NDADDR + level] = oip->i_ib[level]; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } } for (i = 0; i < EXT2_NDADDR; i++) { oldblks[i] = oip->i_db[i]; if (i > lastblock) oip->i_db[i] = 0; } oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ext2_update(ovp, !DOINGASYNC(ovp)); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < EXT2_NDADDR; i++) { newblks[i] = oip->i_db[i]; oip->i_db[i] = oldblks[i]; } for (i = 0; i < EXT2_NIADDR; i++) { newblks[EXT2_NDADDR + i] = oip->i_ib[i]; oip->i_ib[i] = oldblks[EXT2_NDADDR + i]; } oip->i_size = osize; error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); if (error && (allerror == 0)) allerror = error; vnode_pager_setsize(ovp, length); /* * Indirect blocks first. */ indir_lbn[SINGLE] = -EXT2_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->e2fs_fsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = EXT2_NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ext2_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[EXT2_NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < EXT2_NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); BO_LOCK(bo); if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || bo->bo_clean.bv_cnt != 0)) panic("itrunc3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ oip->i_size = length; if (oip->i_blocks >= blocksreleased) oip->i_blocks -= blocksreleased; else /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); return (allerror); } -/* - * Release blocks associated with the inode ip and stored in the indirect - * block bn. Blocks are free'd in LIFO order up to (but not including) - * lastbn. If level is greater than SINGLE, the block is an indirect block - * and recursive calls to indirtrunc must be used to cleanse other indirect - * blocks. - * - * NB: triple indirect blocks are untested. - */ - static int -ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, - daddr_t lastbn, int level, e4fs_daddr_t *countp) +ext2_ext_truncate(struct vnode *vp, off_t length, int flags, + struct ucred *cred, struct thread *td) { + struct vnode *ovp = vp; + int32_t lastblock; + struct m_ext2fs *fs; + struct inode *oip; struct buf *bp; - struct m_ext2fs *fs = ip->i_e2fs; - struct vnode *vp; - e2fs_daddr_t *bap, *copy; - int i, nblocks, error = 0, allerror = 0; - e2fs_lbn_t nb, nlbn, last; - e4fs_daddr_t blkcount, factor, blocksreleased = 0; + uint32_t lbn, offset; + int error, size; + off_t osize; - /* - * Calculate index in current block of last - * block to be kept. -1 indicates the entire - * block so we need not calculate the index. - */ - factor = 1; - for (i = SINGLE; i < level; i++) - factor *= NINDIR(fs); - last = lastbn; - if (lastbn > 0) - last /= factor; - nblocks = btodb(fs->e2fs_bsize); - /* - * Get buffer of block pointers, zero those entries corresponding - * to blocks to be free'd, and update on disk copy first. Since - * double(triple) indirect before single(double) indirect, calls - * to bmap on these blocks will fail. However, we already have - * the on disk address, so we have to set the b_blkno field - * explicitly instead of letting bread do everything for us. - */ - vp = ITOV(ip); - bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0); - if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { - bp->b_iocmd = BIO_READ; - if (bp->b_bcount > bp->b_bufsize) - panic("ext2_indirtrunc: bad buffer size"); - bp->b_blkno = dbn; - vfs_busy_pages(bp, 0); - bp->b_iooffset = dbtob(bp->b_blkno); - bstrategy(bp); - error = bufwait(bp); + oip = VTOI(ovp); + fs = oip->i_e2fs; + osize = oip->i_size; + + if (osize < length) { + if (length > oip->i_e2fs->e2fs_maxfilesize) { + return (EFBIG); + } + vnode_pager_setsize(ovp, length); + offset = blkoff(fs, length - 1); + lbn = lblkno(fs, length - 1); + flags |= BA_CLRBUF; + error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); + if (error) { + vnode_pager_setsize(vp, osize); + return (error); + } + oip->i_size = length; + if (bp->b_bufsize == fs->e2fs_bsize) + bp->b_flags |= B_CLUSTEROK; + if (flags & IO_SYNC) + bwrite(bp); + else if (DOINGASYNC(ovp)) + bdwrite(bp); + else + bawrite(bp); + oip->i_flag |= IN_CHANGE | IN_UPDATE; + return (ext2_update(ovp, !DOINGASYNC(ovp))); } - if (error) { - brelse(bp); - *countp = 0; + + lastblock = (length + fs->e2fs_bsize - 1) / fs->e2fs_bsize; + error = ext4_ext_remove_space(oip, lastblock, flags, cred, td); + if (error) return (error); - } - bap = (e2fs_daddr_t *)bp->b_data; - copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK); - bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize); - bzero((caddr_t)&bap[last + 1], - (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t)); - if (last == -1) - bp->b_flags |= B_INVAL; - if (DOINGASYNC(vp)) { - bdwrite(bp); - } else { - error = bwrite(bp); - if (error) - allerror = error; - } - bap = copy; - /* - * Recursively free totally unused blocks. - */ - for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; - i--, nlbn += factor) { - nb = bap[i]; - if (nb == 0) - continue; - if (level > SINGLE) { - if ((error = ext2_indirtrunc(ip, nlbn, - fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0) - allerror = error; - blocksreleased += blkcount; + offset = blkoff(fs, length); + if (offset == 0) { + oip->i_size = length; + } else { + lbn = lblkno(fs, length); + flags |= BA_CLRBUF; + error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); + if (error) { + return (error); } - ext2_blkfree(ip, nb, fs->e2fs_bsize); - blocksreleased += nblocks; + oip->i_size = length; + size = blksize(fs, oip, lbn); + bzero((char *)bp->b_data + offset, (u_int)(size - offset)); + allocbuf(bp, size); + if (bp->b_bufsize == fs->e2fs_bsize) + bp->b_flags |= B_CLUSTEROK; + if (flags & IO_SYNC) + bwrite(bp); + else if (DOINGASYNC(ovp)) + bdwrite(bp); + else + bawrite(bp); } - /* - * Recursively free last partial block. - */ - if (level > SINGLE && lastbn >= 0) { - last = lastbn % factor; - nb = bap[i]; - if (nb != 0) { - if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb), - last, level - 1, &blkcount)) != 0) - allerror = error; - blocksreleased += blkcount; - } + oip->i_size = osize; + error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); + if (error) + return (error); + + vnode_pager_setsize(ovp, length); + + oip->i_size = length; + oip->i_flag |= IN_CHANGE | IN_UPDATE; + error = ext2_update(ovp, !DOINGASYNC(ovp)); + + return (error); +} + +/* + * Truncate the inode ip to at most length size, freeing the + * disk blocks. + */ +int +ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, + struct thread *td) +{ + struct inode *ip; + int error; + + ASSERT_VOP_LOCKED(vp, "ext2_truncate"); + + if (length < 0) + return (EINVAL); + + ip = VTOI(vp); + if (vp->v_type == VLNK && + ip->i_size < vp->v_mount->mnt_maxsymlinklen) { +#ifdef INVARIANTS + if (length != 0) + panic("ext2_truncate: partial truncate of symlink"); +#endif + bzero((char *)&ip->i_shortlink, (u_int)ip->i_size); + ip->i_size = 0; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + return (ext2_update(vp, 1)); } - free(copy, M_TEMP); - *countp = blocksreleased; - return (allerror); + if (ip->i_size == length) { + ip->i_flag |= IN_CHANGE | IN_UPDATE; + return (ext2_update(vp, 0)); + } + + if (ip->i_flag & IN_E4EXTENTS) + error = ext2_ext_truncate(vp, length, flags, cred, td); + else + error = ext2_ind_truncate(vp, length, flags, cred, td); + + return (error); } /* * discard preallocated blocks */ int ext2_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct thread *td = ap->a_td; int mode, error = 0; /* * Ignore inodes related to stale file handles. */ if (ip->i_mode == 0) goto out; if (ip->i_nlink <= 0) { ext2_extattr_free(ip); error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); - ip->i_rdev = 0; + if (!(ip->i_flag & IN_E4EXTENTS)) + ip->i_rdev = 0; mode = ip->i_mode; ip->i_mode = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_vfree(vp, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) ext2_update(vp, 0); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip->i_mode == 0) vrecycle(vp); return (error); } /* * Reclaim an inode so that it can be used for other purposes. */ int ext2_reclaim(struct vop_reclaim_args *ap) { struct inode *ip; struct vnode *vp = ap->a_vp; ip = VTOI(vp); if (ip->i_flag & IN_LAZYMOD) { ip->i_flag |= IN_MODIFIED; ext2_update(vp, 0); } vfs_hash_remove(vp); free(vp->v_data, M_EXT2NODE); vp->v_data = 0; vnode_destroy_vobject(vp); return (0); } Index: head/sys/fs/ext2fs/ext2_inode_cnv.c =================================================================== --- head/sys/fs/ext2fs/ext2_inode_cnv.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_inode_cnv.c (revision 324706) @@ -1,199 +1,193 @@ /*- * Copyright (c) 1995 The University of Utah and * the Computer Systems Laboratory at the University of Utah (CSL). * All rights reserved. * * Permission to use, copy, modify and distribute this software is hereby * granted provided that (1) source code retains these copyright, permission, * and disclaimer notices, and (2) redistributions including binaries * reproduce the notices in supporting documentation, and (3) all advertising * materials mentioning features or use of this software display the following * acknowledgement: ``This product includes software developed by the * Computer Systems Laboratory at the University of Utah.'' * * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * CSL requests users of this software to return to csl-dist@cs.utah.edu any * improvements that they make and grant CSL redistribution rights. * * Utah $Hdr$ * $FreeBSD$ */ /* * routines to convert on disk ext2 inodes into inodes and back */ #include #include #include #include #include #include #include #include #include #include #include #define XTIME_TO_NSEC(x) ((x & EXT3_NSEC_MASK) >> 2) #define NSEC_TO_XTIME(t) (le32toh(t << 2) & EXT3_NSEC_MASK) #ifdef EXT2FS_DEBUG void ext2_print_inode(struct inode *in) { int i; struct ext4_extent_header *ehp; struct ext4_extent *ep; printf("Inode: %5ju", (uintmax_t)in->i_number); printf( /* "Inode: %5d" */ - " Type: %10s Mode: 0x%o Flags: 0x%x Version: %d acl: 0x%llx\n", + " Type: %10s Mode: 0x%o Flags: 0x%x Version: %d acl: 0x%lx\n", "n/a", in->i_mode, in->i_flags, in->i_gen, in->i_facl); printf("User: %5u Group: %5u Size: %ju\n", in->i_uid, in->i_gid, (uintmax_t)in->i_size); printf("Links: %3d Blockcount: %ju\n", in->i_nlink, (uintmax_t)in->i_blocks); printf("ctime: 0x%x", in->i_ctime); printf("atime: 0x%x", in->i_atime); printf("mtime: 0x%x", in->i_mtime); if (E2DI_HAS_XTIME(in)) printf("crtime %#x ", in->i_birthtime); - printf("BLOCKS:"); - for (i = 0; i < (in->i_blocks <= 24 ? (in->i_blocks + 1) / 2 : 12); i++) - printf(" %d", in->i_db[i]); - printf("\n"); - printf("Extents:\n"); - ehp = (struct ext4_extent_header *)in->i_db; - printf("Header (magic 0x%x entries %d max %d depth %d gen %d)\n", - ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, - ehp->eh_gen); - ep = (struct ext4_extent *)(char *)(ehp + 1); - printf("Index (blk %d len %d start_lo %d start_hi %d)\n", ep->e_blk, - ep->e_len, ep->e_start_lo, ep->e_start_hi); - printf("\n"); + if (in->i_flag & IN_E4EXTENTS) { + printf("Extents:\n"); + ehp = (struct ext4_extent_header *)in->i_db; + printf("Header (magic 0x%x entries %d max %d depth %d gen %d)\n", + ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth, + ehp->eh_gen); + ep = (struct ext4_extent *)(char *)(ehp + 1); + printf("Index (blk %d len %d start_lo %d start_hi %d)\n", ep->e_blk, + ep->e_len, ep->e_start_lo, ep->e_start_hi); + printf("\n"); + } else { + printf("BLOCKS:"); + for (i = 0; i < (in->i_blocks <= 24 ? (in->i_blocks + 1) / 2 : 12); i++) + printf(" %d", in->i_db[i]); + printf("\n"); + } } #endif /* EXT2FS_DEBUG */ /* * raw ext2 inode to inode */ void ext2_ei2i(struct ext2fs_dinode *ei, struct inode *ip) { - int i; - ip->i_nlink = ei->e2di_nlink; /* * Godmar thinks - if the link count is zero, then the inode is * unused - according to ext2 standards. Ufs marks this fact by * setting i_mode to zero - why ? I can see that this might lead to * problems in an undelete. */ ip->i_mode = ei->e2di_nlink ? ei->e2di_mode : 0; ip->i_size = ei->e2di_size; if (S_ISREG(ip->i_mode)) ip->i_size |= ((u_int64_t)ei->e2di_size_high) << 32; ip->i_atime = ei->e2di_atime; ip->i_mtime = ei->e2di_mtime; ip->i_ctime = ei->e2di_ctime; if (E2DI_HAS_XTIME(ip)) { ip->i_atimensec = XTIME_TO_NSEC(ei->e2di_atime_extra); ip->i_mtimensec = XTIME_TO_NSEC(ei->e2di_mtime_extra); ip->i_ctimensec = XTIME_TO_NSEC(ei->e2di_ctime_extra); ip->i_birthtime = ei->e2di_crtime; ip->i_birthnsec = XTIME_TO_NSEC(ei->e2di_crtime_extra); } ip->i_flags = 0; ip->i_flags |= (ei->e2di_flags & EXT2_APPEND) ? SF_APPEND : 0; ip->i_flags |= (ei->e2di_flags & EXT2_IMMUTABLE) ? SF_IMMUTABLE : 0; ip->i_flags |= (ei->e2di_flags & EXT2_NODUMP) ? UF_NODUMP : 0; ip->i_flag |= (ei->e2di_flags & EXT3_INDEX) ? IN_E3INDEX : 0; ip->i_flag |= (ei->e2di_flags & EXT4_EXTENTS) ? IN_E4EXTENTS : 0; ip->i_blocks = ei->e2di_nblock; ip->i_facl = ei->e2di_facl; if (E2DI_HAS_HUGE_FILE(ip)) { ip->i_blocks |= (uint64_t)ei->e2di_nblock_high << 32; ip->i_facl |= (uint64_t)ei->e2di_facl_high << 32; if (ei->e2di_flags & EXT4_HUGE_FILE) ip->i_blocks = fsbtodb(ip->i_e2fs, ip->i_blocks); } ip->i_gen = ei->e2di_gen; ip->i_uid = ei->e2di_uid; ip->i_gid = ei->e2di_gid; ip->i_uid |= (uint32_t)ei->e2di_uid_high << 16; ip->i_gid |= (uint32_t)ei->e2di_gid_high << 16; - for (i = 0; i < EXT2_NDADDR; i++) - ip->i_db[i] = ei->e2di_blocks[i]; - for (i = 0; i < EXT2_NIADDR; i++) - ip->i_ib[i] = ei->e2di_blocks[EXT2_NDIR_BLOCKS + i]; + memcpy(ip->i_data, ei->e2di_blocks, sizeof(ei->e2di_blocks)); } /* * inode to raw ext2 inode */ int ext2_i2ei(struct inode *ip, struct ext2fs_dinode *ei) { struct m_ext2fs *fs; - int i; fs = ip->i_e2fs; ei->e2di_mode = ip->i_mode; ei->e2di_nlink = ip->i_nlink; /* * Godmar thinks: if dtime is nonzero, ext2 says this inode has been * deleted, this would correspond to a zero link count */ ei->e2di_dtime = ei->e2di_nlink ? 0 : ip->i_mtime; ei->e2di_size = ip->i_size; if (S_ISREG(ip->i_mode)) ei->e2di_size_high = ip->i_size >> 32; ei->e2di_atime = ip->i_atime; ei->e2di_mtime = ip->i_mtime; ei->e2di_ctime = ip->i_ctime; if (E2DI_HAS_XTIME(ip)) { ei->e2di_ctime_extra = NSEC_TO_XTIME(ip->i_ctimensec); ei->e2di_mtime_extra = NSEC_TO_XTIME(ip->i_mtimensec); ei->e2di_atime_extra = NSEC_TO_XTIME(ip->i_atimensec); ei->e2di_crtime = ip->i_birthtime; ei->e2di_crtime_extra = NSEC_TO_XTIME(ip->i_birthnsec); } ei->e2di_flags = 0; ei->e2di_flags |= (ip->i_flags & SF_APPEND) ? EXT2_APPEND : 0; ei->e2di_flags |= (ip->i_flags & SF_IMMUTABLE) ? EXT2_IMMUTABLE : 0; ei->e2di_flags |= (ip->i_flags & UF_NODUMP) ? EXT2_NODUMP : 0; ei->e2di_flags |= (ip->i_flag & IN_E3INDEX) ? EXT3_INDEX : 0; ei->e2di_flags |= (ip->i_flag & IN_E4EXTENTS) ? EXT4_EXTENTS : 0; if (ip->i_blocks > ~0U && !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE)) { ext2_fserr(fs, ip->i_uid, "i_blocks value is out of range"); return (EIO); } if (ip->i_blocks <= 0xffffffffffffULL) { ei->e2di_nblock = ip->i_blocks & 0xffffffff; ei->e2di_nblock_high = ip->i_blocks >> 32 & 0xffff; } else { ei->e2di_flags |= EXT4_HUGE_FILE; ei->e2di_nblock = dbtofsb(fs, ip->i_blocks); ei->e2di_nblock_high = dbtofsb(fs, ip->i_blocks) >> 32 & 0xffff; } ei->e2di_facl = ip->i_facl & 0xffffffff; ei->e2di_facl_high = ip->i_facl >> 32 & 0xffff; ei->e2di_gen = ip->i_gen; ei->e2di_uid = ip->i_uid & 0xffff; ei->e2di_uid_high = ip->i_uid >> 16 & 0xffff; ei->e2di_gid = ip->i_gid & 0xffff; ei->e2di_gid_high = ip->i_gid >> 16 & 0xffff; - for (i = 0; i < EXT2_NDADDR; i++) - ei->e2di_blocks[i] = ip->i_db[i]; - for (i = 0; i < EXT2_NIADDR; i++) - ei->e2di_blocks[EXT2_NDIR_BLOCKS + i] = ip->i_ib[i]; + memcpy(ei->e2di_blocks, ip->i_data, sizeof(ei->e2di_blocks)); return (0); } Index: head/sys/fs/ext2fs/ext2_subr.c =================================================================== --- head/sys/fs/ext2fs/ext2_subr.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_subr.c (revision 324706) @@ -1,232 +1,191 @@ /*- * modified for Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_subr.c 8.2 (Berkeley) 9/21/93 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Return buffer with the contents of block "offset" from the beginning of * directory "ip". If "res" is non-zero, fill it in with a pointer to the * remaining space in the directory. */ int ext2_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) { struct inode *ip; struct m_ext2fs *fs; struct buf *bp; e2fs_lbn_t lbn; - int bsize, error; - daddr_t newblk; - struct ext4_extent *ep; - struct ext4_extent_path path; + int error, bsize; ip = VTOI(vp); fs = ip->i_e2fs; lbn = lblkno(fs, offset); bsize = blksize(fs, ip, lbn); - *bpp = NULL; - /* - * IN_E4EXTENTS requires special treatment as we can otherwise fall - * back to the normal path. - */ - if (!(ip->i_flag & IN_E4EXTENTS)) - goto normal; - - memset(&path, 0, sizeof(path)); - if (ext4_ext_find_extent(fs, ip, lbn, &path) == NULL) - goto normal; - ep = path.ep_ext; - if (ep == NULL) - goto normal; - - newblk = lbn - ep->e_blk + - (ep->e_start_lo | (daddr_t)ep->e_start_hi << 32); - - if (path.ep_bp != NULL) { - brelse(path.ep_bp); - path.ep_bp = NULL; - } - error = bread(ip->i_devvp, fsbtodb(fs, newblk), bsize, NOCRED, &bp); - if (error != 0) { + if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) { brelse(bp); return (error); } if (res) *res = (char *)bp->b_data + blkoff(fs, offset); - /* - * If IN_E4EXTENTS is enabled we would get a wrong offset so - * reset b_offset here. - */ - bp->b_offset = lbn * bsize; + *bpp = bp; - return (0); -normal: - if (*bpp == NULL) { - if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) { - brelse(bp); - return (error); - } - if (res) - *res = (char *)bp->b_data + blkoff(fs, offset); - *bpp = bp; - } return (0); } /* * Update the cluster map because of an allocation of free like ffs. * * Cnt == 1 means free; cnt == -1 means allocating. */ void ext2_clusteracct(struct m_ext2fs *fs, char *bbp, int cg, daddr_t bno, int cnt) { int32_t *sump = fs->e2fs_clustersum[cg].cs_sum; int32_t *lp; int back, bit, end, forw, i, loc, start; /* Initialize the cluster summary array. */ if (fs->e2fs_clustersum[cg].cs_init == 0) { int run = 0; bit = 1; loc = 0; for (i = 0; i < fs->e2fs->e2fs_fpg; i++) { if ((bbp[loc] & bit) == 0) run++; else if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } if (run != 0) { if (run > fs->e2fs_contigsumsize) run = fs->e2fs_contigsumsize; sump[run]++; } fs->e2fs_clustersum[cg].cs_init = 1; } if (fs->e2fs_contigsumsize <= 0) return; /* Find the size of the cluster going forward. */ start = bno + 1; end = start + fs->e2fs_contigsumsize; if (end > fs->e2fs->e2fs_fpg) end = fs->e2fs->e2fs_fpg; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i < end; i++) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != (NBBY - 1)) bit <<= 1; else { loc++; bit = 1; } } forw = i - start; /* Find the size of the cluster going backward. */ start = bno - 1; end = start - fs->e2fs_contigsumsize; if (end < 0) end = -1; loc = start / NBBY; bit = 1 << (start % NBBY); for (i = start; i > end; i--) { if ((bbp[loc] & bit) != 0) break; if ((i & (NBBY - 1)) != 0) bit >>= 1; else { loc--; bit = 1 << (NBBY - 1); } } back = start - i; /* * Account for old cluster and the possibly new forward and * back clusters. */ i = back + forw + 1; if (i > fs->e2fs_contigsumsize) i = fs->e2fs_contigsumsize; sump[i] += cnt; if (back > 0) sump[back] -= cnt; if (forw > 0) sump[forw] -= cnt; /* Update cluster summary information. */ lp = &sump[fs->e2fs_contigsumsize]; for (i = fs->e2fs_contigsumsize; i > 0; i--) if (*lp-- > 0) break; fs->e2fs_maxcluster[cg] = i; } Index: head/sys/fs/ext2fs/ext2_vfsops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vfsops.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_vfsops.c (revision 324706) @@ -1,1128 +1,1129 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1989, 1991, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.8 (Berkeley) 4/18/94 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td); static int ext2_mountfs(struct vnode *, struct mount *); static int ext2_reload(struct mount *mp, struct thread *td); static int ext2_sbupdate(struct ext2mount *, int); static int ext2_cgupdate(struct ext2mount *, int); static vfs_unmount_t ext2_unmount; static vfs_root_t ext2_root; static vfs_statfs_t ext2_statfs; static vfs_sync_t ext2_sync; static vfs_vget_t ext2_vget; static vfs_fhtovp_t ext2_fhtovp; static vfs_mount_t ext2_mount; MALLOC_DEFINE(M_EXT2NODE, "ext2_node", "EXT2 vnode private part"); static MALLOC_DEFINE(M_EXT2MNT, "ext2_mount", "EXT2 mount structure"); static struct vfsops ext2fs_vfsops = { .vfs_fhtovp = ext2_fhtovp, .vfs_mount = ext2_mount, .vfs_root = ext2_root, /* root inode via vget */ .vfs_statfs = ext2_statfs, .vfs_sync = ext2_sync, .vfs_unmount = ext2_unmount, .vfs_vget = ext2_vget, }; VFS_SET(ext2fs_vfsops, ext2fs, 0); static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly); static int compute_sb_data(struct vnode * devvp, struct ext2fs * es, struct m_ext2fs * fs); static const char *ext2_opts[] = { "acls", "async", "noatime", "noclusterr", "noclusterw", "noexec", "export", "force", "from", "multilabel", "suiddir", "nosymfollow", "sync", "union", NULL }; /* * VFS Operations. * * mount system call */ static int ext2_mount(struct mount *mp) { struct vfsoptlist *opts; struct vnode *devvp; struct thread *td; struct ext2mount *ump = NULL; struct m_ext2fs *fs; struct nameidata nd, *ndp = &nd; accmode_t accmode; char *path, *fspec; int error, flags, len; td = curthread; opts = mp->mnt_optnew; if (vfs_filteropt(opts, ext2_opts)) return (EINVAL); vfs_getopt(opts, "fspath", (void **)&path, NULL); /* Double-check the length of path.. */ if (strlen(path) >= MAXMNTLEN) return (ENAMETOOLONG); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOEXT2(mp); fs = ump->um_e2fs; error = 0; if (fs->e2fs_ronly == 0 && vfs_flagopt(opts, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2_flushfiles(mp, flags, td); if (error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) { fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } fs->e2fs_ronly = 1; vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY); g_topology_lock(); g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ext2_reload(mp, td); if (error) return (error); devvp = ump->um_devvp; if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) { if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0)) return (EPERM); /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); g_topology_lock(); error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); if (error) return (error); if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 || (fs->e2fs->e2fs_state & E2FS_ERRORS)) { if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->e2fs_fsmnt); return (EPERM); } } fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; (void)ext2_cgupdate(ump, MNT_WAIT); fs->e2fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } if (vfs_flagopt(opts, "export", NULL, 0)) { /* Process export requests in vfs_mount.c. */ return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp)) != 0) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * XXXRW: VOP_ACCESS() enough? */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = ext2_mountfs(devvp, mp); } else { if (devvp != ump->um_devvp) { vput(devvp); return (EINVAL); /* needs translation */ } else vput(devvp); } if (error) { vrele(devvp); return (error); } ump = VFSTOEXT2(mp); fs = ump->um_e2fs; /* * Note that this strncpy() is ok because of a check at the start * of ext2_mount(). */ strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN); fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0'; vfs_mountedfrom(mp, fspec); return (0); } static int ext2_check_sb_compat(struct ext2fs *es, struct cdev *dev, int ronly) { uint32_t i, mask; if (es->e2fs_magic != E2FS_MAGIC) { printf("ext2fs: %s: wrong magic number %#x (expected %#x)\n", devtoname(dev), es->e2fs_magic, E2FS_MAGIC); return (1); } if (es->e2fs_rev > E2FS_REV0) { mask = es->e2fs_features_incompat & ~(EXT2F_INCOMPAT_SUPP | EXT4F_RO_INCOMPAT_SUPP); if (mask) { printf("WARNING: mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(incompat)/sizeof(struct ext2_feature); i++) if (mask & incompat[i].mask) printf("%s ", incompat[i].name); printf("\n"); return (1); } mask = es->e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP; if (!ronly && mask) { printf("WARNING: R/W mount of %s denied due to " "unsupported optional features:\n", devtoname(dev)); for (i = 0; i < sizeof(ro_compat)/sizeof(struct ext2_feature); i++) if (mask & ro_compat[i].mask) printf("%s ", ro_compat[i].name); printf("\n"); return (1); } } return (0); } /* * This computes the fields of the m_ext2fs structure from the * data in the ext2fs structure read in. */ static int compute_sb_data(struct vnode *devvp, struct ext2fs *es, struct m_ext2fs *fs) { int db_count, error; int i; int logic_sb_block = 1; /* XXX for now */ struct buf *bp; uint32_t e2fs_descpb; fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + es->e2fs_log_bsize; fs->e2fs_bsize = 1U << fs->e2fs_bshift; fs->e2fs_fsbtodb = es->e2fs_log_bsize + 1; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << es->e2fs_log_fsize; if (fs->e2fs_fsize) fs->e2fs_fpb = fs->e2fs_bsize / fs->e2fs_fsize; fs->e2fs_bpg = es->e2fs_bpg; fs->e2fs_fpg = es->e2fs_fpg; fs->e2fs_ipg = es->e2fs_ipg; if (es->e2fs_rev == E2FS_REV0) { fs->e2fs_isize = E2FS_REV0_INODE_SIZE; } else { fs->e2fs_isize = es->e2fs_inode_size; /* * Simple sanity check for superblock inode size value. */ if (EXT2_INODE_SIZE(fs) < E2FS_REV0_INODE_SIZE || EXT2_INODE_SIZE(fs) > fs->e2fs_bsize || (fs->e2fs_isize & (fs->e2fs_isize - 1)) != 0) { printf("ext2fs: invalid inode size %d\n", fs->e2fs_isize); return (EIO); } } /* Check for extra isize in big inodes. */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_EXTRA_ISIZE) && EXT2_INODE_SIZE(fs) < sizeof(struct ext2fs_dinode)) { printf("ext2fs: no space for extra inode timestamps\n"); return (EINVAL); } /* Check for group descriptor size */ if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_64BIT) && (es->e3fs_desc_size != sizeof(struct ext2_gd))) { printf("ext2fs: group descriptor size unsupported %d\n", es->e3fs_desc_size); return (EINVAL); } fs->e2fs_ipb = fs->e2fs_bsize / EXT2_INODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs_ipg / fs->e2fs_ipb; /* s_resuid / s_resgid ? */ fs->e2fs_gcount = howmany(es->e2fs_bcount - es->e2fs_first_dblock, EXT2_BLOCKS_PER_GROUP(fs)); e2fs_descpb = fs->e2fs_bsize / sizeof(struct ext2_gd); db_count = howmany(fs->e2fs_gcount, e2fs_descpb); fs->e2fs_gdbcount = db_count; fs->e2fs_gd = malloc(db_count * fs->e2fs_bsize, M_EXT2MNT, M_WAITOK); fs->e2fs_contigdirs = malloc(fs->e2fs_gcount * sizeof(*fs->e2fs_contigdirs), M_EXT2MNT, M_WAITOK | M_ZERO); /* * Adjust logic_sb_block. * Godmar thinks: if the blocksize is greater than 1024, then * the superblock is logically part of block zero. */ if (fs->e2fs_bsize > SBSIZE) logic_sb_block = 0; for (i = 0; i < db_count; i++) { error = bread(devvp, fsbtodb(fs, logic_sb_block + i + 1), fs->e2fs_bsize, NOCRED, &bp); if (error) { free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); brelse(bp); return (error); } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp); bp = NULL; } /* Verify cg csum */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) { error = ext2_gd_csum_verify(fs, devvp->v_rdev); if (error) return (error); } /* Initialization for the ext2 Orlov allocator variant. */ fs->e2fs_total_dir = 0; for (i = 0; i < fs->e2fs_gcount; i++) fs->e2fs_total_dir += fs->e2fs_gd[i].ext2bgd_ndirs; if (es->e2fs_rev == E2FS_REV0 || !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_LARGEFILE)) fs->e2fs_maxfilesize = 0x7fffffff; else { fs->e2fs_maxfilesize = 0xffffffffffff; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE)) fs->e2fs_maxfilesize = 0x7fffffffffffffff; } if (es->e4fs_flags & E2FS_UNSIGNED_HASH) { fs->e2fs_uhash = 3; } else if ((es->e4fs_flags & E2FS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->e4fs_flags |= E2FS_UNSIGNED_HASH; fs->e2fs_uhash = 3; #else es->e4fs_flags |= E2FS_SIGNED_HASH; #endif } return (0); } /* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) invalidate all cluster summary information. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. * XXX we are missing some steps, in particular # 3, this has to be reviewed. */ static int ext2_reload(struct mount *mp, struct thread *td) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct ext2fs *es; struct m_ext2fs *fs; struct csum *sump; int error, i; int32_t *lp; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); /* * Step 1: invalidate all cached meta-data. */ devvp = VFSTOEXT2(mp)->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); if (vinvalbuf(devvp, 0, 0, 0) != 0) panic("ext2_reload: dirty1"); VOP_UNLOCK(devvp, 0); /* * Step 2: re-read superblock from disk. * constants have been adjusted for ext2 */ if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) return (error); es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, devvp->v_rdev, 0) != 0) { brelse(bp); return (EIO); /* XXX needs translation */ } fs = VFSTOEXT2(mp)->um_e2fs; bcopy(bp->b_data, fs->e2fs, sizeof(struct ext2fs)); if ((error = compute_sb_data(devvp, es, fs)) != 0) { brelse(bp); return (error); } #ifdef UNKLAR if (fs->fs_sbsize < SBSIZE) bp->b_flags |= B_INVAL; #endif brelse(bp); /* * Step 3: invalidate all cluster summary information. */ if (fs->e2fs_contigsumsize > 0) { lp = fs->e2fs_maxcluster; sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) { *lp++ = fs->e2fs_contigsumsize; sump->cs_init = 0; bzero(sump->cs_sum, fs->e2fs_contigsumsize + 1); } } loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { /* * Step 4: invalidate all cached file data. */ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } if (vinvalbuf(vp, 0, 0, 0)) panic("ext2_reload: dirty2"); /* * Step 5: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, &bp); if (error) { VOP_UNLOCK(vp, 0); vrele(vp); MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); return (error); } ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)), ip); brelse(bp); VOP_UNLOCK(vp, 0); vrele(vp); } return (0); } /* * Common code for mount and mountroot. */ static int ext2_mountfs(struct vnode *devvp, struct mount *mp) { struct ext2mount *ump; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *es; struct cdev *dev = devvp->v_rdev; struct g_consumer *cp; struct bufobj *bo; struct csum *sump; int error; int ronly; int i; u_long size; int32_t *lp; int32_t e2fs_maxcontig; ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0); /* XXX: use VOP_ACESS to check FS perms */ g_topology_lock(); error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1); g_topology_unlock(); VOP_UNLOCK(devvp, 0); if (error) return (error); /* XXX: should we check for some sectorsize or 512 instead? */ if (((SBSIZE % cp->provider->sectorsize) != 0) || (SBSIZE < cp->provider->sectorsize)) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); return (EINVAL); } bo = &devvp->v_bufobj; bo->bo_private = cp; bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; bp = NULL; ump = NULL; if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0) goto out; es = (struct ext2fs *)bp->b_data; if (ext2_check_sb_compat(es, dev, ronly) != 0) { error = EINVAL; /* XXX needs translation */ goto out; } if ((es->e2fs_state & E2FS_ISCLEAN) == 0 || (es->e2fs_state & E2FS_ERRORS)) { if (ronly || (mp->mnt_flag & MNT_FORCE)) { printf( "WARNING: Filesystem was not properly dismounted\n"); } else { printf( "WARNING: R/W mount denied. Filesystem is not clean - run fsck\n"); error = EPERM; goto out; } } ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO); /* * I don't know whether this is the right strategy. Note that * we dynamically allocate both an m_ext2fs and an ext2fs * while Linux keeps the super block in a locked buffer. */ ump->um_e2fs = malloc(sizeof(struct m_ext2fs), M_EXT2MNT, M_WAITOK | M_ZERO); ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs), M_EXT2MNT, M_WAITOK); mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF); bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs)); if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs))) goto out; /* * Calculate the maximum contiguous blocks and size of cluster summary * array. In FFS this is done by newfs; however, the superblock * in ext2fs doesn't have these variables, so we can calculate * them here. */ e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize); ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG); if (ump->um_e2fs->e2fs_contigsumsize > 0) { size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t); ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK); size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum); ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK); lp = ump->um_e2fs->e2fs_maxcluster; sump = ump->um_e2fs->e2fs_clustersum; for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) { *lp++ = ump->um_e2fs->e2fs_contigsumsize; sump->cs_init = 0; sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) * sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO); } } brelse(bp); bp = NULL; fs = ump->um_e2fs; fs->e2fs_ronly = ronly; /* ronly is set according to mnt_flags */ /* * If the fs is not mounted read-only, make sure the super block is * always written back on a sync(). */ fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0; if (ronly == 0) { fs->e2fs_fmod = 1; /* mark it modified */ fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; /* set fs invalid */ } mp->mnt_data = ump; mp->mnt_stat.f_fsid.val[0] = dev2udev(dev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); ump->um_mountp = mp; ump->um_dev = dev; ump->um_devvp = devvp; ump->um_bo = &devvp->v_bufobj; ump->um_cp = cp; /* * Setting those two parameters allowed us to use * ufs_bmap w/o changse! */ ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs); ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1; ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs); if (ronly == 0) ext2_sbupdate(ump, MNT_WAIT); /* * Initialize filesystem stat information in mount struct. */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | MNTK_USES_BCACHE; MNT_IUNLOCK(mp); return (0); out: if (bp) brelse(bp); if (cp != NULL) { g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); } if (ump) { mtx_destroy(EXT2_MTX(ump)); free(ump->um_e2fs->e2fs_gd, M_EXT2MNT); free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT); free(ump->um_e2fs->e2fs, M_EXT2MNT); free(ump->um_e2fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; } return (error); } /* * Unmount system call. */ static int ext2_unmount(struct mount *mp, int mntflags) { struct ext2mount *ump; struct m_ext2fs *fs; struct csum *sump; int error, flags, i, ronly; flags = 0; if (mntflags & MNT_FORCE) { if (mp->mnt_flag & MNT_ROOTFS) return (EINVAL); flags |= FORCECLOSE; } if ((error = ext2_flushfiles(mp, flags, curthread)) != 0) return (error); ump = VFSTOEXT2(mp); fs = ump->um_e2fs; ronly = fs->e2fs_ronly; if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) { if (fs->e2fs_wasvalid) fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } g_topology_lock(); g_vfs_close(ump->um_cp); g_topology_unlock(); vrele(ump->um_devvp); sump = fs->e2fs_clustersum; for (i = 0; i < fs->e2fs_gcount; i++, sump++) free(sump->cs_sum, M_EXT2MNT); free(fs->e2fs_clustersum, M_EXT2MNT); free(fs->e2fs_maxcluster, M_EXT2MNT); free(fs->e2fs_gd, M_EXT2MNT); free(fs->e2fs_contigdirs, M_EXT2MNT); free(fs->e2fs, M_EXT2MNT); free(fs, M_EXT2MNT); free(ump, M_EXT2MNT); mp->mnt_data = NULL; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); return (error); } /* * Flush out all the files in a filesystem. */ static int ext2_flushfiles(struct mount *mp, int flags, struct thread *td) { int error; error = vflush(mp, 0, flags, td); return (error); } /* * Get filesystem statistics. */ int ext2_statfs(struct mount *mp, struct statfs *sbp) { struct ext2mount *ump; struct m_ext2fs *fs; uint32_t overhead, overhead_per_group, ngdb; int i, ngroups; ump = VFSTOEXT2(mp); fs = ump->um_e2fs; if (fs->e2fs->e2fs_magic != E2FS_MAGIC) panic("ext2_statfs"); /* * Compute the overhead (FS structures) */ overhead_per_group = 1 /* block bitmap */ + 1 /* inode bitmap */ + fs->e2fs_itpg; overhead = fs->e2fs->e2fs_first_dblock + fs->e2fs_gcount * overhead_per_group; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) { for (i = 0, ngroups = 0; i < fs->e2fs_gcount; i++) { if (ext2_cg_has_sb(fs, i)) ngroups++; } } else { ngroups = fs->e2fs_gcount; } ngdb = fs->e2fs_gdbcount; if (fs->e2fs->e2fs_rev > E2FS_REV0 && fs->e2fs->e2fs_features_compat & EXT2F_COMPAT_RESIZE) ngdb += fs->e2fs->e2fs_reserved_ngdb; overhead += ngroups * (1 /* superblock */ + ngdb); sbp->f_bsize = EXT2_FRAG_SIZE(fs); sbp->f_iosize = EXT2_BLOCK_SIZE(fs); sbp->f_blocks = fs->e2fs->e2fs_bcount - overhead; sbp->f_bfree = fs->e2fs->e2fs_fbcount; sbp->f_bavail = sbp->f_bfree - fs->e2fs->e2fs_rbcount; sbp->f_files = fs->e2fs->e2fs_icount; sbp->f_ffree = fs->e2fs->e2fs_ficount; return (0); } /* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ static int ext2_sync(struct mount *mp, int waitfor) { struct vnode *mvp, *vp; struct thread *td; struct inode *ip; struct ext2mount *ump = VFSTOEXT2(mp); struct m_ext2fs *fs; int error, allerror = 0; td = curthread; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("ext2_sync: rofs mod"); } /* * Write back each (modified) inode. */ loop: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && (vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); continue; } error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); if (error) { if (error == ENOENT) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto loop; } continue; } if ((error = VOP_FSYNC(vp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(vp, 0); vrele(vp); } /* * Force stale filesystem control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, waitfor, td)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp, 0); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs->e2fs_wtime = time_second; if ((error = ext2_cgupdate(ump, waitfor)) != 0) allerror = error; } return (allerror); } /* * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ static int ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; struct buf *bp; struct vnode *vp; struct thread *td; int i, error; int used_blocks; td = curthread; error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); ump = VFSTOEXT2(mp); ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { free(ip, M_EXT2NODE); *vpp = NULL; return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } /* convert ext2 inode to dinode */ ext2_ei2i((struct ext2fs_dinode *)((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ino)), ip); ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; /* * Now we want to make sure that block pointers for unused * blocks are zeroed out - ext2_balloc depends on this * although for regular files and directories only * * If IN_E4EXTENTS is enabled, unused blocks are not zeroed * out because we could corrupt the extent tree. */ if (!(ip->i_flag & IN_E4EXTENTS) && (S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode))) { used_blocks = howmany(ip->i_size, fs->e2fs_bsize); for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++) ip->i_db[i] = 0; } #ifdef EXT2FS_DEBUG ext2_print_inode(ip); + ext4_ext_print_extent_tree_status(ip); #endif bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ *vpp = vp; return (0); } /* * File handle to vnode * * Have to be really careful about stale file handles: * - check that the inode number is valid * - call ext2_vget() to get the locked inode * - check for an unallocated inode (i_mode == 0) * - check that the given client host has export rights and return * those rights via. exflagsp and credanonp */ static int ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp) { struct inode *ip; struct ufid *ufhp; struct vnode *nvp; struct m_ext2fs *fs; int error; ufhp = (struct ufid *)fhp; fs = VFSTOEXT2(mp)->um_e2fs; if (ufhp->ufid_ino < EXT2_ROOTINO || ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs->e2fs_ipg) return (ESTALE); error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp); if (error) { *vpp = NULLVP; return (error); } ip = VTOI(nvp); if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) { vput(nvp); *vpp = NULLVP; return (ESTALE); } *vpp = nvp; vnode_create_vobject(*vpp, 0, curthread); return (0); } /* * Write a superblock and associated information back to disk. */ static int ext2_sbupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct ext2fs *es = fs->e2fs; struct buf *bp; int error = 0; bp = getblk(mp->um_devvp, SBLOCK, SBSIZE, 0, 0, 0); bcopy((caddr_t)es, bp->b_data, (u_int)sizeof(struct ext2fs)); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); /* * The buffers for group descriptors, inode bitmaps and block bitmaps * are not busy at this point and are (hopefully) written by the * usual sync mechanism. No need to write them here. */ return (error); } int ext2_cgupdate(struct ext2mount *mp, int waitfor) { struct m_ext2fs *fs = mp->um_e2fs; struct buf *bp; int i, error = 0, allerror = 0; allerror = ext2_sbupdate(mp, waitfor); /* Update gd csums */ if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) ext2_gd_csum_set(fs); for (i = 0; i < fs->e2fs_gdbcount; i++) { bp = getblk(mp->um_devvp, fsbtodb(fs, fs->e2fs->e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, 0, 0, 0); e2fs_cgsave(&fs->e2fs_gd[ i * fs->e2fs_bsize / sizeof(struct ext2_gd)], (struct ext2_gd *)bp->b_data, fs->e2fs_bsize); if (waitfor == MNT_WAIT) error = bwrite(bp); else bawrite(bp); } if (!allerror && error) allerror = error; return (allerror); } /* * Return the root of a filesystem. */ static int ext2_root(struct mount *mp, int flags, struct vnode **vpp) { struct vnode *nvp; int error; error = VFS_VGET(mp, EXT2_ROOTINO, LK_EXCLUSIVE, &nvp); if (error) return (error); *vpp = nvp; return (0); } Index: head/sys/fs/ext2fs/ext2_vnops.c =================================================================== --- head/sys/fs/ext2fs/ext2_vnops.c (revision 324705) +++ head/sys/fs/ext2fs/ext2_vnops.c (revision 324706) @@ -1,2427 +1,2293 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science */ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)ufs_vnops.c 8.7 (Berkeley) 2/3/94 * @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95 * $FreeBSD$ */ #include "opt_suiddir.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_directio.h" #include #include #include #include #include #include #include #include #include #include static int ext2_makeinode(int mode, struct vnode *, struct vnode **, struct componentname *); static void ext2_itimes_locked(struct vnode *); -static int ext4_ext_read(struct vop_read_args *); -static int ext2_ind_read(struct vop_read_args *); static vop_access_t ext2_access; static int ext2_chmod(struct vnode *, int, struct ucred *, struct thread *); static int ext2_chown(struct vnode *, uid_t, gid_t, struct ucred *, struct thread *); static vop_close_t ext2_close; static vop_create_t ext2_create; static vop_fsync_t ext2_fsync; static vop_getattr_t ext2_getattr; static vop_ioctl_t ext2_ioctl; static vop_link_t ext2_link; static vop_mkdir_t ext2_mkdir; static vop_mknod_t ext2_mknod; static vop_open_t ext2_open; static vop_pathconf_t ext2_pathconf; static vop_print_t ext2_print; static vop_read_t ext2_read; static vop_readlink_t ext2_readlink; static vop_remove_t ext2_remove; static vop_rename_t ext2_rename; static vop_rmdir_t ext2_rmdir; static vop_setattr_t ext2_setattr; static vop_strategy_t ext2_strategy; static vop_symlink_t ext2_symlink; static vop_write_t ext2_write; static vop_deleteextattr_t ext2_deleteextattr; static vop_getextattr_t ext2_getextattr; static vop_listextattr_t ext2_listextattr; static vop_setextattr_t ext2_setextattr; static vop_vptofh_t ext2_vptofh; static vop_close_t ext2fifo_close; static vop_kqfilter_t ext2fifo_kqfilter; /* Global vfs data structures for ext2. */ struct vop_vector ext2_vnodeops = { .vop_default = &default_vnodeops, .vop_access = ext2_access, .vop_bmap = ext2_bmap, .vop_cachedlookup = ext2_lookup, .vop_close = ext2_close, .vop_create = ext2_create, .vop_fsync = ext2_fsync, .vop_getpages = vnode_pager_local_getpages, .vop_getpages_async = vnode_pager_local_getpages_async, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_ioctl = ext2_ioctl, .vop_link = ext2_link, .vop_lookup = vfs_cache_lookup, .vop_mkdir = ext2_mkdir, .vop_mknod = ext2_mknod, .vop_open = ext2_open, .vop_pathconf = ext2_pathconf, .vop_poll = vop_stdpoll, .vop_print = ext2_print, .vop_read = ext2_read, .vop_readdir = ext2_readdir, .vop_readlink = ext2_readlink, .vop_reallocblks = ext2_reallocblks, .vop_reclaim = ext2_reclaim, .vop_remove = ext2_remove, .vop_rename = ext2_rename, .vop_rmdir = ext2_rmdir, .vop_setattr = ext2_setattr, .vop_strategy = ext2_strategy, .vop_symlink = ext2_symlink, .vop_write = ext2_write, .vop_deleteextattr = ext2_deleteextattr, .vop_getextattr = ext2_getextattr, .vop_listextattr = ext2_listextattr, .vop_setextattr = ext2_setextattr, #ifdef UFS_ACL .vop_getacl = ext2_getacl, .vop_setacl = ext2_setacl, .vop_aclcheck = ext2_aclcheck, #endif /* UFS_ACL */ .vop_vptofh = ext2_vptofh, }; struct vop_vector ext2_fifoops = { .vop_default = &fifo_specops, .vop_access = ext2_access, .vop_close = ext2fifo_close, .vop_fsync = ext2_fsync, .vop_getattr = ext2_getattr, .vop_inactive = ext2_inactive, .vop_kqfilter = ext2fifo_kqfilter, .vop_print = ext2_print, .vop_read = VOP_PANIC, .vop_reclaim = ext2_reclaim, .vop_setattr = ext2_setattr, .vop_write = VOP_PANIC, .vop_vptofh = ext2_vptofh, }; /* * A virgin directory (no blushing please). * Note that the type and namlen fields are reversed relative to ext2. * Also, we don't use `struct odirtemplate', since it would just cause * endianness problems. */ static struct dirtemplate mastertemplate = { 0, 12, 1, EXT2_FT_DIR, ".", 0, DIRBLKSIZ - 12, 2, EXT2_FT_DIR, ".." }; static struct dirtemplate omastertemplate = { 0, 12, 1, EXT2_FT_UNKNOWN, ".", 0, DIRBLKSIZ - 12, 2, EXT2_FT_UNKNOWN, ".." }; static void ext2_itimes_locked(struct vnode *vp) { struct inode *ip; struct timespec ts; ASSERT_VI_LOCKED(vp, __func__); ip = VTOI(vp); if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0) return; if ((vp->v_type == VBLK || vp->v_type == VCHR)) ip->i_flag |= IN_LAZYMOD; else ip->i_flag |= IN_MODIFIED; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { vfs_timestamp(&ts); if (ip->i_flag & IN_ACCESS) { ip->i_atime = ts.tv_sec; ip->i_atimensec = ts.tv_nsec; } if (ip->i_flag & IN_UPDATE) { ip->i_mtime = ts.tv_sec; ip->i_mtimensec = ts.tv_nsec; ip->i_modrev++; } if (ip->i_flag & IN_CHANGE) { ip->i_ctime = ts.tv_sec; ip->i_ctimensec = ts.tv_nsec; } } ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); } void ext2_itimes(struct vnode *vp) { VI_LOCK(vp); ext2_itimes_locked(vp); VI_UNLOCK(vp); } /* * Create a regular file */ static int ext2_create(struct vop_create_args *ap) { int error; error = ext2_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode), ap->a_dvp, ap->a_vpp, ap->a_cnp); if (error != 0) return (error); if ((ap->a_cnp->cn_flags & MAKEENTRY) != 0) cache_enter(ap->a_dvp, *ap->a_vpp, ap->a_cnp); return (0); } static int ext2_open(struct vop_open_args *ap) { if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR) return (EOPNOTSUPP); /* * Files marked append-only must be opened for appending. */ if ((VTOI(ap->a_vp)->i_flags & APPEND) && (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) return (EPERM); vnode_create_vobject(ap->a_vp, VTOI(ap->a_vp)->i_size, ap->a_td); return (0); } /* * Close called. * * Update the times on the inode. */ static int ext2_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (0); } static int ext2_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); accmode_t accmode = ap->a_accmode; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) return (EOPNOTSUPP); /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ if (accmode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } /* If immutable bit set, nobody gets to write it. */ if ((accmode & VWRITE) && (ip->i_flags & (SF_IMMUTABLE | SF_SNAPSHOT))) return (EPERM); error = vaccess(vp->v_type, ip->i_mode, ip->i_uid, ip->i_gid, ap->a_accmode, ap->a_cred, NULL); return (error); } static int ext2_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct vattr *vap = ap->a_vap; ext2_itimes(vp); /* * Copy from inode table */ vap->va_fsid = dev2udev(ip->i_devvp->v_rdev); vap->va_fileid = ip->i_number; vap->va_mode = ip->i_mode & ~IFMT; vap->va_nlink = ip->i_nlink; vap->va_uid = ip->i_uid; vap->va_gid = ip->i_gid; vap->va_rdev = ip->i_rdev; vap->va_size = ip->i_size; vap->va_atime.tv_sec = ip->i_atime; vap->va_atime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_atimensec : 0; vap->va_mtime.tv_sec = ip->i_mtime; vap->va_mtime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_mtimensec : 0; vap->va_ctime.tv_sec = ip->i_ctime; vap->va_ctime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_ctimensec : 0; if E2DI_HAS_XTIME(ip) { vap->va_birthtime.tv_sec = ip->i_birthtime; vap->va_birthtime.tv_nsec = ip->i_birthnsec; } vap->va_flags = ip->i_flags; vap->va_gen = ip->i_gen; vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; vap->va_bytes = dbtob((u_quad_t)ip->i_blocks); vap->va_type = IFTOVT(ip->i_mode); vap->va_filerev = ip->i_modrev; return (0); } /* * Set attribute vnode op. called from several syscalls */ static int ext2_setattr(struct vop_setattr_args *ap) { struct vattr *vap = ap->a_vap; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct ucred *cred = ap->a_cred; struct thread *td = curthread; int error; /* * Check for unsettable attributes. */ if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { return (EINVAL); } if (vap->va_flags != VNOVAL) { /* Disallow flags not supported by ext2fs. */ if (vap->va_flags & ~(SF_APPEND | SF_IMMUTABLE | UF_NODUMP)) return (EOPNOTSUPP); if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * Callers may only modify the file flags on objects they * have VADMIN rights for. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Unprivileged processes and privileged processes in * jail() are not permitted to unset system flags, or * modify flags if any system flags are set. * Privileged non-jail processes may not modify system flags * if securelevel > 0 and any existing system flags are set. */ if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND)) { error = securelevel_gt(cred, 0); if (error) return (error); } } else { if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND) || ((vap->va_flags ^ ip->i_flags) & SF_SETTABLE)) return (EPERM); } ip->i_flags = vap->va_flags; ip->i_flag |= IN_CHANGE; if (ip->i_flags & (IMMUTABLE | APPEND)) return (0); } if (ip->i_flags & (IMMUTABLE | APPEND)) return (EPERM); /* * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if ((error = ext2_chown(vp, vap->va_uid, vap->va_gid, cred, td)) != 0) return (error); } if (vap->va_size != VNOVAL) { /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ switch (vp->v_type) { case VDIR: return (EISDIR); case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } if ((error = ext2_truncate(vp, vap->va_size, 0, cred, td)) != 0) return (error); } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); /* * From utimes(2): * If times is NULL, ... The caller must be the owner of * the file, have permission to write the file, or be the * super-user. * If times is non-NULL, ... The caller must be the owner of * the file or be the super-user. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td)) && ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || (error = VOP_ACCESS(vp, VWRITE, cred, td)))) return (error); ip->i_flag |= IN_CHANGE | IN_MODIFIED; if (vap->va_atime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_ACCESS; ip->i_atime = vap->va_atime.tv_sec; ip->i_atimensec = vap->va_atime.tv_nsec; } if (vap->va_mtime.tv_sec != VNOVAL) { ip->i_flag &= ~IN_UPDATE; ip->i_mtime = vap->va_mtime.tv_sec; ip->i_mtimensec = vap->va_mtime.tv_nsec; } ip->i_birthtime = vap->va_birthtime.tv_sec; ip->i_birthnsec = vap->va_birthtime.tv_nsec; error = ext2_update(vp, 0); if (error) return (error); } error = 0; if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); error = ext2_chmod(vp, (int)vap->va_mode, cred, td); } return (error); } /* * Change the mode on a file. * Inode must be locked before calling. */ static int ext2_chmod(struct vnode *vp, int mode, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); int error; /* * To modify the permissions on a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * Privileged processes may set the sticky bit on non-directories, * as well as set the setgid bit on a file with a group that the * process is not a member of. */ if (vp->v_type != VDIR && (mode & S_ISTXT)) { error = priv_check_cred(cred, PRIV_VFS_STICKYFILE, 0); if (error) return (EFTYPE); } if (!groupmember(ip->i_gid, cred) && (mode & ISGID)) { error = priv_check_cred(cred, PRIV_VFS_SETGID, 0); if (error) return (error); } ip->i_mode &= ~ALLPERMS; ip->i_mode |= (mode & ALLPERMS); ip->i_flag |= IN_CHANGE; return (0); } /* * Perform chown operation on inode ip; * inode must be locked prior to call. */ static int ext2_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct thread *td) { struct inode *ip = VTOI(vp); uid_t ouid; gid_t ogid; int error = 0; if (uid == (uid_t)VNOVAL) uid = ip->i_uid; if (gid == (gid_t)VNOVAL) gid = ip->i_gid; /* * To modify the ownership of a file, must possess VADMIN * for that file. */ if ((error = VOP_ACCESS(vp, VADMIN, cred, td))) return (error); /* * To change the owner of a file, or change the group of a file * to a group of which we are not a member, the caller must * have privilege. */ if (uid != ip->i_uid || (gid != ip->i_gid && !groupmember(gid, cred))) { error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0); if (error) return (error); } ogid = ip->i_gid; ouid = ip->i_uid; ip->i_gid = gid; ip->i_uid = uid; ip->i_flag |= IN_CHANGE; if ((ip->i_mode & (ISUID | ISGID)) && (ouid != uid || ogid != gid)) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0) != 0) ip->i_mode &= ~(ISUID | ISGID); } return (0); } /* * Synch an open file. */ /* ARGSUSED */ static int ext2_fsync(struct vop_fsync_args *ap) { /* * Flush all dirty buffers associated with a vnode. */ vop_stdfsync(ap); return (ext2_update(ap->a_vp, ap->a_waitfor == MNT_WAIT)); } /* * Mknod vnode call */ /* ARGSUSED */ static int ext2_mknod(struct vop_mknod_args *ap) { struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; ino_t ino; int error; error = ext2_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ - ip->i_rdev = vap->va_rdev; + if (!(ip->i_flag & IN_E4EXTENTS)) + ip->i_rdev = vap->va_rdev; } /* * Remove inode, then reload it through VFS_VGET so it is * checked to see if it is an alias of an existing entry in * the inode cache. XXX I don't believe this is necessary now. */ (*vpp)->v_type = VNON; ino = ip->i_number; /* Save this before vgone() invalidates ip. */ vgone(*vpp); vput(*vpp); error = VFS_VGET(ap->a_dvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { *vpp = NULL; return (error); } return (0); } static int ext2_remove(struct vop_remove_args *ap) { struct inode *ip; struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; int error; ip = VTOI(vp); if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(dvp)->i_flags & APPEND)) { error = EPERM; goto out; } error = ext2_dirremove(dvp, ap->a_cnp); if (error == 0) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } static unsigned short ext2_max_nlink(struct inode *ip) { struct m_ext2fs *fs; fs = ip->i_e2fs; if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_DIR_NLINK)) return (EXT4_LINK_MAX); else return (EXT2_LINK_MAX); } /* * link vnode call */ static int ext2_link(struct vop_link_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; struct inode *ip; int error; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_link: no name"); #endif ip = VTOI(vp); if ((nlink_t)ip->i_nlink >= ext2_max_nlink(ip)) { error = EMLINK; goto out; } if (ip->i_flags & (IMMUTABLE | APPEND)) { error = EPERM; goto out; } ip->i_nlink++; ip->i_flag |= IN_CHANGE; error = ext2_update(vp, !DOINGASYNC(vp)); if (!error) error = ext2_direnter(ip, tdvp, cnp); if (error) { ip->i_nlink--; ip->i_flag |= IN_CHANGE; } out: return (error); } static int ext2_inc_nlink(struct inode *ip) { ip->i_nlink++; if (ext2_htree_has_idx(ip) && ip->i_nlink > 1) { if (ip->i_nlink >= ext2_max_nlink(ip) || ip->i_nlink == 2) ip->i_nlink = 1; } else if (ip->i_nlink > ext2_max_nlink(ip)) { ip->i_nlink--; return (EMLINK); } return (0); } static void ext2_dec_nlink(struct inode *ip) { if (!S_ISDIR(ip->i_mode) || ip->i_nlink > 2) ip->i_nlink--; } /* * Rename system call. * rename("foo", "bar"); * is essentially * unlink("bar"); * link("foo", "bar"); * unlink("foo"); * but ``atomically''. Can't do full commit without saving state in the * inode on disk which isn't feasible at this time. Best we can do is * always guarantee the target exists. * * Basic algorithm is: * * 1) Bump link count on source while we're linking it to the * target. This also ensure the inode won't be deleted out * from underneath us while we work (it may be truncated by * a concurrent `trunc' or `open' for creation). * 2) Link source to destination. If destination already exists, * delete it first. * 3) Unlink source reference to inode if still around. If a * directory was moved and the parent of the destination * is different from the source, patch the ".." entry in the * directory. */ static int ext2_rename(struct vop_rename_args *ap) { struct vnode *tvp = ap->a_tvp; struct vnode *tdvp = ap->a_tdvp; struct vnode *fvp = ap->a_fvp; struct vnode *fdvp = ap->a_fdvp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; struct inode *ip, *xp, *dp; struct dirtemplate dirbuf; int doingdirectory = 0, oldparent = 0, newparent = 0; int error = 0; u_char namlen; #ifdef INVARIANTS if ((tcnp->cn_flags & HASBUF) == 0 || (fcnp->cn_flags & HASBUF) == 0) panic("ext2_rename: no name"); #endif /* * Check for cross-device rename. */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; abortit: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); return (error); } if (tvp && ((VTOI(tvp)->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (VTOI(tdvp)->i_flags & APPEND))) { error = EPERM; goto abortit; } /* * Renaming a file to itself has no effect. The upper layers should * not call us in that case. Temporarily just warn if they do. */ if (fvp == tvp) { printf("ext2_rename: fvp == tvp (can't happen)\n"); error = 0; goto abortit; } if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0) goto abortit; dp = VTOI(fdvp); ip = VTOI(fvp); if (ip->i_nlink >= ext2_max_nlink(ip) && !ext2_htree_has_idx(ip)) { VOP_UNLOCK(fvp, 0); error = EMLINK; goto abortit; } if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) || (dp->i_flags & APPEND)) { VOP_UNLOCK(fvp, 0); error = EPERM; goto abortit; } if ((ip->i_mode & IFMT) == IFDIR) { /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || dp == ip || (fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT || (ip->i_flag & IN_RENAME)) { VOP_UNLOCK(fvp, 0); error = EINVAL; goto abortit; } ip->i_flag |= IN_RENAME; oldparent = dp->i_number; doingdirectory++; } vrele(fdvp); /* * When the target exists, both the directory * and target vnodes are returned locked. */ dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); /* * 1) Bump link count while we're moving stuff * around. If we crash somewhere before * completing our work, the link count * may be wrong, but correctable. */ ext2_inc_nlink(ip); ip->i_flag |= IN_CHANGE; if ((error = ext2_update(fvp, !DOINGASYNC(fvp))) != 0) { VOP_UNLOCK(fvp, 0); goto bad; } /* * If ".." must be changed (ie the directory gets a new * parent) then the source directory must not be in the * directory hierarchy above the target, as this would * orphan everything below the source directory. Also * the user must have write permission in the source so * as to be able to change "..". We must repeat the call * to namei, as the parent directory is unlocked by the * call to checkpath(). */ error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_thread); VOP_UNLOCK(fvp, 0); if (oldparent != dp->i_number) newparent = dp->i_number; if (doingdirectory && newparent) { if (error) /* write access check above */ goto bad; if (xp != NULL) vput(tvp); error = ext2_checkpath(ip, dp, tcnp->cn_cred); if (error) goto out; VREF(tdvp); error = relookup(tdvp, &tvp, tcnp); if (error) goto out; vrele(tdvp); dp = VTOI(tdvp); xp = NULL; if (tvp) xp = VTOI(tvp); } /* * 2) If target doesn't exist, link the target * to the source and unlink the source. * Otherwise, rewrite the target directory * entry to reference the source inode and * expunge the original entry's existence. */ if (xp == NULL) { if (dp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Account for ".." in new directory. * When source and destination have the same * parent we don't fool with the link count. */ if (doingdirectory && newparent) { error = ext2_inc_nlink(dp); if (error) goto bad; dp->i_flag |= IN_CHANGE; error = ext2_update(tdvp, !DOINGASYNC(tdvp)); if (error) goto bad; } error = ext2_direnter(ip, tdvp, tcnp); if (error) { if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; (void)ext2_update(tdvp, 1); } goto bad; } vput(tdvp); } else { if (xp->i_devvp != dp->i_devvp || xp->i_devvp != ip->i_devvp) panic("ext2_rename: EXDEV"); /* * Short circuit rename(foo, foo). */ if (xp->i_number == ip->i_number) panic("ext2_rename: same file"); /* * If the parent directory is "sticky", then the user must * own the parent directory, or the destination of the rename, * otherwise the destination may not be changed (except by * root). This implements append-only directories. */ if ((dp->i_mode & S_ISTXT) && tcnp->cn_cred->cr_uid != 0 && tcnp->cn_cred->cr_uid != dp->i_uid && xp->i_uid != tcnp->cn_cred->cr_uid) { error = EPERM; goto bad; } /* * Target must be empty if a directory and have no links * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ if ((xp->i_mode & IFMT) == IFDIR) { if (!ext2_dirempty(xp, dp->i_number, tcnp->cn_cred)) { error = ENOTEMPTY; goto bad; } if (!doingdirectory) { error = ENOTDIR; goto bad; } cache_purge(tdvp); } else if (doingdirectory) { error = EISDIR; goto bad; } error = ext2_dirrewrite(dp, ip, tcnp); if (error) goto bad; /* * If the target directory is in the same * directory as the source directory, * decrement the link count on the parent * of the target directory. */ if (doingdirectory && !newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } vput(tdvp); /* * Adjust the link count of the target to * reflect the dirrewrite above. If this is * a directory it is empty and there are * no links to it, so we can squash the inode and * any space associated with it. We disallowed * renaming over top of a directory with links to * it above, as the remaining link would point to * a directory without "." or ".." entries. */ ext2_dec_nlink(xp); if (doingdirectory) { if (--xp->i_nlink != 0) panic("ext2_rename: linked directory"); error = ext2_truncate(tvp, (off_t)0, IO_SYNC, tcnp->cn_cred, tcnp->cn_thread); } xp->i_flag |= IN_CHANGE; vput(tvp); xp = NULL; } /* * 3) Unlink the source. */ fcnp->cn_flags &= ~MODMASK; fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; VREF(fdvp); error = relookup(fdvp, &fvp, fcnp); if (error == 0) vrele(fdvp); if (fvp != NULL) { xp = VTOI(fvp); dp = VTOI(fdvp); } else { /* * From name has disappeared. IN_RENAME is not sufficient * to protect against directory races due to timing windows, * so we can't panic here. */ vrele(ap->a_fvp); return (0); } /* * Ensure that the directory entry still exists and has not * changed while the new name has been entered. If the source is * a file then the entry may have been unlinked or renamed. In * either case there is no further work to be done. If the source * is a directory then it cannot have been rmdir'ed; its link * count of three would cause a rmdir to fail with ENOTEMPTY. * The IN_RENAME flag ensures that it cannot be moved by another * rename. */ if (xp != ip) { /* * From name resolves to a different inode. IN_RENAME is * not sufficient protection against timing window races * so we can't panic here. */ } else { /* * If the source is a directory with a * new parent, the link count of the old * parent directory must be decremented * and ".." set to point to the new parent. */ if (doingdirectory && newparent) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; error = vn_rdwr(UIO_READ, fvp, (caddr_t)&dirbuf, sizeof(struct dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); if (error == 0) { /* Like ufs little-endian: */ namlen = dirbuf.dotdot_type; if (namlen != 2 || dirbuf.dotdot_name[0] != '.' || dirbuf.dotdot_name[1] != '.') { ext2_dirbad(xp, (doff_t)12, "rename: mangled dir"); } else { dirbuf.dotdot_ino = newparent; (void)vn_rdwr(UIO_WRITE, fvp, (caddr_t)&dirbuf, sizeof(struct dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, tcnp->cn_cred, NOCRED, NULL, NULL); cache_purge(fdvp); } } } error = ext2_dirremove(fdvp, fcnp); if (!error) { ext2_dec_nlink(xp); xp->i_flag |= IN_CHANGE; } xp->i_flag &= ~IN_RENAME; } if (dp) vput(fdvp); if (xp) vput(fvp); vrele(ap->a_fvp); return (error); bad: if (xp) vput(ITOV(xp)); vput(ITOV(dp)); out: if (doingdirectory) ip->i_flag &= ~IN_RENAME; if (vn_lock(fvp, LK_EXCLUSIVE) == 0) { ext2_dec_nlink(ip); ip->i_flag |= IN_CHANGE; ip->i_flag &= ~IN_RENAME; vput(fvp); } else vrele(fvp); return (error); } #ifdef UFS_ACL static int ext2_do_posix1e_acl_inheritance_dir(struct vnode *dvp, struct vnode *tvp, mode_t dmode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *dacl, *acl; acl = acl_alloc(M_WAITOK); dacl = acl_alloc(M_WAITOK); /* * Retrieve default ACL from parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. If the ACL is empty, fall through to * the "not defined or available" case. */ if (acl->acl_cnt != 0) { dmode = acl_posix1e_newfilemode(dmode, acl); ip->i_mode = dmode; *dacl = *acl; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = dmode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); if (error == 0) error = VOP_SETACL(tvp, ACL_TYPE_DEFAULT, dacl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above * was supposed to free acl. */ #ifdef DEBUG printf("ext2_mkdir: VOP_GETACL() but no VOP_SETACL()\n"); #endif /* DEBUG */ break; default: goto out; } out: acl_free(acl); acl_free(dacl); return (error); } static int ext2_do_posix1e_acl_inheritance_file(struct vnode *dvp, struct vnode *tvp, mode_t mode, struct ucred *cred, struct thread *td) { int error; struct inode *ip = VTOI(tvp); struct acl *acl; acl = acl_alloc(M_WAITOK); /* * Retrieve default ACL for parent, if any. */ error = VOP_GETACL(dvp, ACL_TYPE_DEFAULT, acl, cred, td); switch (error) { case 0: /* * Retrieved a default ACL, so merge mode and ACL if * necessary. */ if (acl->acl_cnt != 0) { /* * Two possible ways for default ACL to not * be present. First, the EA can be * undefined, or second, the default ACL can * be blank. If it's blank, fall through to * the it's not defined case. */ mode = acl_posix1e_newfilemode(mode, acl); ip->i_mode = mode; ext2_sync_acl_from_inode(ip, acl); break; } /* FALLTHROUGH */ case EOPNOTSUPP: /* * Just use the mode as-is. */ ip->i_mode = mode; error = 0; goto out; default: goto out; } error = VOP_SETACL(tvp, ACL_TYPE_ACCESS, acl, cred, td); switch (error) { case 0: break; case EOPNOTSUPP: /* * XXX: This should not happen, as EOPNOTSUPP above was * supposed to free acl. */ printf("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()\n"); /* panic("ufs_do_posix1e_acl_inheritance_file: VOP_GETACL() " "but no VOP_SETACL()"); */ break; default: goto out; } out: acl_free(acl); return (error); } #endif /* UFS_ACL */ /* * Mkdir system call */ static int ext2_mkdir(struct vop_mkdir_args *ap) { struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; struct vnode *tvp; struct dirtemplate dirtemplate, *dtp; int error, dmode; #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_mkdir: no name"); #endif dp = VTOI(dvp); if ((nlink_t)dp->i_nlink >= ext2_max_nlink(dp) && !ext2_htree_has_idx(dp)) { error = EMLINK; goto out; } dmode = vap->va_mode & 0777; dmode |= IFDIR; /* * Must simulate part of ext2_makeinode here to acquire the inode, * but not have it entered in the parent directory. The entry is * made later after writing "." and ".." entries. */ error = ext2_valloc(dvp, dmode, cnp->cn_cred, &tvp); if (error) goto out; ip = VTOI(tvp); ip->i_gid = dp->i_gid; #ifdef SUIDDIR { /* * if we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * The new directory also inherits the SUID bit. * If user's UID and dir UID are the same, * 'give it away' so that the SUID is still forced on. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (dp->i_mode & ISUID) && dp->i_uid) { dmode |= ISUID; ip->i_uid = dp->i_uid; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = dmode; tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ ip->i_nlink = 2; if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; error = ext2_update(tvp, 1); /* * Bump link count in parent directory * to reflect work done below. Should * be done before reference is created * so reparation is possible if we crash. */ ext2_inc_nlink(dp); dp->i_flag |= IN_CHANGE; error = ext2_update(dvp, !DOINGASYNC(dvp)); if (error) goto bad; /* Initialize directory with "." and ".." from static template. */ if (EXT2_HAS_INCOMPAT_FEATURE(ip->i_e2fs, EXT2F_INCOMPAT_FTYPE)) dtp = &mastertemplate; else dtp = &omastertemplate; dirtemplate = *dtp; dirtemplate.dot_ino = ip->i_number; dirtemplate.dotdot_ino = dp->i_number; /* * note that in ext2 DIRBLKSIZ == blocksize, not DEV_BSIZE so let's * just redefine it - for this function only */ #undef DIRBLKSIZ #define DIRBLKSIZ VTOI(dvp)->i_e2fs->e2fs_bsize dirtemplate.dotdot_reclen = DIRBLKSIZ - 12; error = vn_rdwr(UIO_WRITE, tvp, (caddr_t)&dirtemplate, sizeof(dirtemplate), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_SYNC | IO_NOMACCHECK, cnp->cn_cred, NOCRED, NULL, NULL); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; goto bad; } if (DIRBLKSIZ > VFSTOEXT2(dvp->v_mount)->um_mountp->mnt_stat.f_bsize) /* XXX should grow with balloc() */ panic("ext2_mkdir: blksize"); else { ip->i_size = DIRBLKSIZ; ip->i_flag |= IN_CHANGE; } #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_dir(dvp, tvp, dmode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ /* Directory set up, now install its entry in the parent directory. */ error = ext2_direnter(ip, dvp, cnp); if (error) { ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; } bad: /* * No need to do an explicit VOP_TRUNCATE here, vrele will do this * for us because we set the link count to 0. */ if (error) { ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); } else *ap->a_vpp = tvp; out: return (error); #undef DIRBLKSIZ #define DIRBLKSIZ DEV_BSIZE } /* * Rmdir system call. */ static int ext2_rmdir(struct vop_rmdir_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; int error; ip = VTOI(vp); dp = VTOI(dvp); /* * Verify the directory is empty (and valid). * (Rmdir ".." won't be valid since * ".." will contain a reference to * the current directory and thus be * non-empty.) */ if (!ext2_dirempty(ip, dp->i_number, cnp->cn_cred)) { error = ENOTEMPTY; goto out; } if ((dp->i_flags & APPEND) || (ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND))) { error = EPERM; goto out; } /* * Delete reference to directory before purging * inode. If we crash in between, the directory * will be reattached to lost+found, */ error = ext2_dirremove(dvp, cnp); if (error) goto out; ext2_dec_nlink(dp); dp->i_flag |= IN_CHANGE; cache_purge(dvp); VOP_UNLOCK(dvp, 0); /* * Truncate inode. The only stuff left * in the directory is "." and "..". */ ip->i_nlink = 0; error = ext2_truncate(vp, (off_t)0, IO_SYNC, cnp->cn_cred, cnp->cn_thread); cache_purge(ITOV(ip)); if (vn_lock(dvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { VOP_UNLOCK(vp, 0); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } out: return (error); } /* * symlink -- make a symbolic link */ static int ext2_symlink(struct vop_symlink_args *ap) { struct vnode *vp, **vpp = ap->a_vpp; struct inode *ip; int len, error; error = ext2_makeinode(IFLNK | ap->a_vap->va_mode, ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); vp = *vpp; len = strlen(ap->a_target); if (len < vp->v_mount->mnt_maxsymlinklen) { ip = VTOI(vp); bcopy(ap->a_target, (char *)ip->i_shortlink, len); ip->i_size = len; ip->i_flag |= IN_CHANGE | IN_UPDATE; } else error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0, UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK, ap->a_cnp->cn_cred, NOCRED, NULL, NULL); if (error) vput(vp); return (error); } /* * Return target name of a symbolic link */ static int ext2_readlink(struct vop_readlink_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); int isize; isize = ip->i_size; if (isize < vp->v_mount->mnt_maxsymlinklen) { uiomove((char *)ip->i_shortlink, isize, ap->a_uio); return (0); } return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred)); } /* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. * * In order to be able to swap to a file, the ext2_bmaparray() operation may not * deadlock on memory. See ext2_bmap() for details. */ static int ext2_strategy(struct vop_strategy_args *ap) { struct buf *bp = ap->a_bp; struct vnode *vp = ap->a_vp; struct bufobj *bo; daddr_t blkno; int error; if (vp->v_type == VBLK || vp->v_type == VCHR) panic("ext2_strategy: spec"); if (bp->b_blkno == bp->b_lblkno) { - error = ext2_bmaparray(vp, bp->b_lblkno, &blkno, NULL, NULL); + + if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS) + error = ext4_bmapext(vp, bp->b_lblkno, &blkno, NULL, NULL); + else + error = ext2_bmaparray(vp, bp->b_lblkno, &blkno, NULL, NULL); + bp->b_blkno = blkno; if (error) { bp->b_error = error; bp->b_ioflags |= BIO_ERROR; bufdone(bp); return (0); } if ((long)bp->b_blkno == -1) vfs_bio_clrbuf(bp); } if ((long)bp->b_blkno == -1) { bufdone(bp); return (0); } bp->b_iooffset = dbtob(bp->b_blkno); bo = VFSTOEXT2(vp->v_mount)->um_bo; BO_STRATEGY(bo, bp); return (0); } /* * Print out the contents of an inode. */ static int ext2_print(struct vop_print_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); vn_printf(ip->i_devvp, "\tino %ju", (uintmax_t)ip->i_number); if (vp->v_type == VFIFO) fifo_printinfo(vp); printf("\n"); return (0); } /* * Close wrapper for fifos. * * Update the times on the inode then do device close. */ static int ext2fifo_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; VI_LOCK(vp); if (vp->v_usecount > 1) ext2_itimes_locked(vp); VI_UNLOCK(vp); return (fifo_specops.vop_close(ap)); } /* * Kqfilter wrapper for fifos. * * Fall through to ext2 kqfilter routines if needed */ static int ext2fifo_kqfilter(struct vop_kqfilter_args *ap) { int error; error = fifo_specops.vop_kqfilter(ap); if (error) error = vfs_kqfilter(ap); return (error); } /* * Return POSIX pathconf information applicable to ext2 filesystems. */ static int ext2_pathconf(struct vop_pathconf_args *ap) { int error = 0; switch (ap->a_name) { case _PC_LINK_MAX: if (ext2_htree_has_idx(VTOI(ap->a_vp))) *ap->a_retval = INT_MAX; else *ap->a_retval = ext2_max_nlink(VTOI(ap->a_vp)); break; case _PC_NO_TRUNC: *ap->a_retval = 1; break; #ifdef UFS_ACL case _PC_ACL_EXTENDED: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = 1; else *ap->a_retval = 0; break; case _PC_ACL_PATH_MAX: if (ap->a_vp->v_mount->mnt_flag & MNT_ACLS) *ap->a_retval = ACL_MAX_ENTRIES; else *ap->a_retval = 3; break; #endif /* UFS_ACL */ case _PC_MIN_HOLE_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_PRIO_IO: *ap->a_retval = 0; break; case _PC_SYNC_IO: *ap->a_retval = 0; break; case _PC_ALLOC_SIZE_MIN: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize; break; case _PC_FILESIZEBITS: *ap->a_retval = 64; break; case _PC_REC_INCR_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_MAX_XFER_SIZE: *ap->a_retval = -1; /* means ``unlimited'' */ break; case _PC_REC_MIN_XFER_SIZE: *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize; break; case _PC_REC_XFER_ALIGN: *ap->a_retval = PAGE_SIZE; break; case _PC_SYMLINK_MAX: *ap->a_retval = MAXPATHLEN; break; default: error = vop_stdpathconf(ap); break; } return (error); } /* * Vnode operation to remove a named attribute. */ static int ext2_deleteextattr(struct vop_deleteextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_delete(ip, ap->a_attrnamespace, ap->a_name); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_delete(ip, ap->a_attrnamespace, ap->a_name); return (error); } /* * Vnode operation to retrieve a named extended attribute. */ static int ext2_getextattr(struct vop_getextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; error = ENOATTR; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); if (error != ENOATTR) return (error); } if (ip->i_facl) error = ext2_extattr_block_get(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to retrieve extended attributes on a vnode. */ static int ext2_listextattr(struct vop_listextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VREAD); if (error) return (error); if (ap->a_size != NULL) *ap->a_size = 0; if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); if (error) return (error); } if (ip->i_facl) error = ext2_extattr_block_list(ip, ap->a_attrnamespace, ap->a_uio, ap->a_size); return (error); } /* * Vnode operation to set a named attribute. */ static int ext2_setextattr(struct vop_setextattr_args *ap) { struct inode *ip; struct m_ext2fs *fs; int error; ip = VTOI(ap->a_vp); fs = ip->i_e2fs; if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR)) return (EOPNOTSUPP); if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) return (EOPNOTSUPP); error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, ap->a_cred, ap->a_td, VWRITE); if (error) return (error); error = ext2_extattr_valid_attrname(ap->a_attrnamespace, ap->a_name); if (error) return (error); if (EXT2_INODE_SIZE(fs) != E2FS_REV0_INODE_SIZE) { error = ext2_extattr_inode_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); if (error != ENOSPC) return (error); } error = ext2_extattr_block_set(ip, ap->a_attrnamespace, ap->a_name, ap->a_uio); return (error); } /* * Vnode pointer to File handle */ /* ARGSUSED */ static int ext2_vptofh(struct vop_vptofh_args *ap) { struct inode *ip; struct ufid *ufhp; ip = VTOI(ap->a_vp); ufhp = (struct ufid *)ap->a_fhp; ufhp->ufid_len = sizeof(struct ufid); ufhp->ufid_ino = ip->i_number; ufhp->ufid_gen = ip->i_gen; return (0); } /* * Initialize the vnode associated with a new inode, handle aliased * vnodes. */ int ext2_vinit(struct mount *mntp, struct vop_vector *fifoops, struct vnode **vpp) { struct inode *ip; struct vnode *vp; vp = *vpp; ip = VTOI(vp); vp->v_type = IFTOVT(ip->i_mode); if (vp->v_type == VFIFO) vp->v_op = fifoops; if (ip->i_number == EXT2_ROOTINO) vp->v_vflag |= VV_ROOT; ip->i_modrev = init_va_filerev(); *vpp = vp; return (0); } /* * Allocate a new inode. */ static int ext2_makeinode(int mode, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { struct inode *ip, *pdir; struct vnode *tvp; int error; pdir = VTOI(dvp); #ifdef INVARIANTS if ((cnp->cn_flags & HASBUF) == 0) panic("ext2_makeinode: no name"); #endif *vpp = NULL; if ((mode & IFMT) == 0) mode |= IFREG; error = ext2_valloc(dvp, mode, cnp->cn_cred, &tvp); if (error) { return (error); } ip = VTOI(tvp); ip->i_gid = pdir->i_gid; #ifdef SUIDDIR { /* * if we are * not the owner of the directory, * and we are hacking owners here, (only do this where told to) * and we are not giving it TOO root, (would subvert quotas) * then go ahead and give it to the other user. * Note that this drops off the execute bits for security. */ if ((dvp->v_mount->mnt_flag & MNT_SUIDDIR) && (pdir->i_mode & ISUID) && (pdir->i_uid != cnp->cn_cred->cr_uid) && pdir->i_uid) { ip->i_uid = pdir->i_uid; mode &= ~07111; } else { ip->i_uid = cnp->cn_cred->cr_uid; } } #else ip->i_uid = cnp->cn_cred->cr_uid; #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = mode; tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */ ip->i_nlink = 1; if ((ip->i_mode & ISGID) && !groupmember(ip->i_gid, cnp->cn_cred)) { if (priv_check_cred(cnp->cn_cred, PRIV_VFS_RETAINSUGID, 0)) ip->i_mode &= ~ISGID; } if (cnp->cn_flags & ISWHITEOUT) ip->i_flags |= UF_OPAQUE; /* * Make sure inode goes to disk before directory entry. */ error = ext2_update(tvp, !DOINGASYNC(tvp)); if (error) goto bad; #ifdef UFS_ACL if (dvp->v_mount->mnt_flag & MNT_ACLS) { error = ext2_do_posix1e_acl_inheritance_file(dvp, tvp, mode, cnp->cn_cred, cnp->cn_thread); if (error) goto bad; } #endif /* UFS_ACL */ error = ext2_direnter(ip, dvp, cnp); if (error) goto bad; *vpp = tvp; return (0); bad: /* * Write error occurred trying to update the inode * or the directory so must deallocate the inode. */ ip->i_nlink = 0; ip->i_flag |= IN_CHANGE; vput(tvp); return (error); } /* * Vnode op for reading. */ static int ext2_read(struct vop_read_args *ap) { struct vnode *vp; struct inode *ip; - int error; - - vp = ap->a_vp; - ip = VTOI(vp); - - /* EXT4_EXT_LOCK(ip); */ - if (ip->i_flag & IN_E4EXTENTS) - error = ext4_ext_read(ap); - else - error = ext2_ind_read(ap); - /* EXT4_EXT_UNLOCK(ip); */ - return (error); -} - -/* - * Vnode op for reading. - */ -static int -ext2_ind_read(struct vop_read_args *ap) -{ - struct vnode *vp; - struct inode *ip; struct uio *uio; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t bytesinfile; long size, xfersize, blkoffset; int error, orig_resid, seqcount; int ioflag; vp = ap->a_vp; uio = ap->a_uio; ioflag = ap->a_ioflag; seqcount = ap->a_ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_READ) panic("%s: mode", "ext2_read"); if (vp->v_type == VLNK) { if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) panic("%s: short symlink", "ext2_read"); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("%s: type %d", "ext2_read", vp->v_type); #endif orig_resid = uio->uio_resid; KASSERT(orig_resid >= 0, ("ext2_read: uio->uio_resid < 0")); if (orig_resid == 0) return (0); KASSERT(uio->uio_offset >= 0, ("ext2_read: uio->uio_offset < 0")); fs = ip->i_e2fs; if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) return (EOVERFLOW); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = blksize(fs, ip, lbn); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (lblktosize(fs, nextlbn) >= ip->i_size) error = bread(vp, lbn, size, NOCRED, &bp); else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, blkoffset + uio->uio_resid, seqcount, 0, &bp); } else if (seqcount > 1) { u_int nextsize = blksize(fs, ip, nextlbn); error = breadn(vp, lbn, size, &nextlbn, &nextsize, 1, NOCRED, &bp); } else error = bread(vp, lbn, size, NOCRED, &bp); if (error) { brelse(bp); bp = NULL; break; } /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error, * then we want to ensure that we do not uiomove bad * or uninitialized data. */ size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; vfs_bio_brelse(bp, ioflag); } /* * This can only happen in the case of an error because the loop * above resets bp to NULL on each iteration and on normal * completion has not set a new value into it. so it must have come * from a 'break' statement */ if (bp != NULL) vfs_bio_brelse(bp, ioflag); if ((error == 0 || uio->uio_resid != orig_resid) && (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) ip->i_flag |= IN_ACCESS; return (error); } static int ext2_ioctl(struct vop_ioctl_args *ap) { switch (ap->a_command) { case FIOSEEKDATA: case FIOSEEKHOLE: return (vn_bmap_seekhole(ap->a_vp, ap->a_command, (off_t *)ap->a_data, ap->a_cred)); default: return (ENOTTY); } -} - -/* - * this function handles ext4 extents block mapping - */ -static int -ext4_ext_read(struct vop_read_args *ap) -{ - static unsigned char zeroes[EXT2_MAX_BLOCK_SIZE]; - struct vnode *vp; - struct inode *ip; - struct uio *uio; - struct m_ext2fs *fs; - struct buf *bp; - struct ext4_extent nex, *ep; - struct ext4_extent_path path; - daddr_t lbn, newblk; - off_t bytesinfile; - int cache_type; - ssize_t orig_resid; - int error; - long size, xfersize, blkoffset; - - vp = ap->a_vp; - ip = VTOI(vp); - uio = ap->a_uio; - memset(&path, 0, sizeof(path)); - - orig_resid = uio->uio_resid; - KASSERT(orig_resid >= 0, ("%s: uio->uio_resid < 0", __func__)); - if (orig_resid == 0) - return (0); - KASSERT(uio->uio_offset >= 0, ("%s: uio->uio_offset < 0", __func__)); - fs = ip->i_e2fs; - if (uio->uio_offset < ip->i_size && uio->uio_offset >= fs->e2fs_maxfilesize) - return (EOVERFLOW); - - while (uio->uio_resid > 0) { - if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) - break; - lbn = lblkno(fs, uio->uio_offset); - size = blksize(fs, ip, lbn); - blkoffset = blkoff(fs, uio->uio_offset); - - xfersize = fs->e2fs_fsize - blkoffset; - xfersize = MIN(xfersize, uio->uio_resid); - xfersize = MIN(xfersize, bytesinfile); - - /* get block from ext4 extent cache */ - cache_type = ext4_ext_in_cache(ip, lbn, &nex); - switch (cache_type) { - case EXT4_EXT_CACHE_NO: - ext4_ext_find_extent(fs, ip, lbn, &path); - if (path.ep_is_sparse) - ep = &path.ep_sparse_ext; - else - ep = path.ep_ext; - if (ep == NULL) - return (EIO); - - ext4_ext_put_cache(ip, ep, - path.ep_is_sparse ? EXT4_EXT_CACHE_GAP : EXT4_EXT_CACHE_IN); - - newblk = lbn - ep->e_blk + (ep->e_start_lo | - (daddr_t)ep->e_start_hi << 32); - - if (path.ep_bp != NULL) { - brelse(path.ep_bp); - path.ep_bp = NULL; - } - break; - - case EXT4_EXT_CACHE_GAP: - /* block has not been allocated yet */ - break; - - case EXT4_EXT_CACHE_IN: - newblk = lbn - nex.e_blk + (nex.e_start_lo | - (daddr_t)nex.e_start_hi << 32); - break; - - default: - panic("%s: invalid cache type", __func__); - } - - if (cache_type == EXT4_EXT_CACHE_GAP || - (cache_type == EXT4_EXT_CACHE_NO && path.ep_is_sparse)) { - if (xfersize > sizeof(zeroes)) - xfersize = sizeof(zeroes); - error = uiomove(zeroes, xfersize, uio); - if (error) - return (error); - } else { - error = bread(ip->i_devvp, fsbtodb(fs, newblk), size, - NOCRED, &bp); - if (error) { - brelse(bp); - return (error); - } - - size -= bp->b_resid; - if (size < xfersize) { - if (size == 0) { - bqrelse(bp); - break; - } - xfersize = size; - } - error = uiomove(bp->b_data + blkoffset, xfersize, uio); - bqrelse(bp); - if (error) - return (error); - } - } - - return (0); } /* * Vnode op for writing. */ static int ext2_write(struct vop_write_args *ap) { struct vnode *vp; struct uio *uio; struct inode *ip; struct m_ext2fs *fs; struct buf *bp; daddr_t lbn; off_t osize; int blkoffset, error, flags, ioflag, resid, size, seqcount, xfersize; ioflag = ap->a_ioflag; uio = ap->a_uio; vp = ap->a_vp; seqcount = ioflag >> IO_SEQSHIFT; ip = VTOI(vp); #ifdef INVARIANTS if (uio->uio_rw != UIO_WRITE) panic("%s: mode", "ext2_write"); #endif switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = ip->i_size; if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) return (EPERM); /* FALLTHROUGH */ case VLNK: break; case VDIR: /* XXX differs from ffs -- this is called from ext2_mkdir(). */ if ((ioflag & IO_SYNC) == 0) panic("ext2_write: nonsync dir write"); break; default: panic("ext2_write: type %p %d (%jd,%jd)", (void *)vp, vp->v_type, (intmax_t)uio->uio_offset, (intmax_t)uio->uio_resid); } KASSERT(uio->uio_resid >= 0, ("ext2_write: uio->uio_resid < 0")); KASSERT(uio->uio_offset >= 0, ("ext2_write: uio->uio_offset < 0")); fs = ip->i_e2fs; if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->e2fs_maxfilesize) return (EFBIG); /* * Maybe this should be above the vnode op call, but so long as * file servers have no limits, I don't think it matters. */ if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); resid = uio->uio_resid; osize = ip->i_size; if (seqcount > BA_SEQMAX) flags = BA_SEQMAX << BA_SEQSHIFT; else flags = seqcount << BA_SEQSHIFT; if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) flags |= IO_SYNC; for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->e2fs_fsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (uio->uio_offset + xfersize > ip->i_size) vnode_pager_setsize(vp, uio->uio_offset + xfersize); /* * We must perform a read-before-write if the transfer size * does not cover the entire buffer. */ if (fs->e2fs_bsize > xfersize) flags |= BA_CLRBUF; else flags &= ~BA_CLRBUF; error = ext2_balloc(ip, lbn, blkoffset + xfersize, ap->a_cred, &bp, flags); if (error != 0) break; if ((ioflag & (IO_SYNC | IO_INVAL)) == (IO_SYNC | IO_INVAL)) bp->b_flags |= B_NOCACHE; if (uio->uio_offset + xfersize > ip->i_size) ip->i_size = uio->uio_offset + xfersize; size = blksize(fs, ip, lbn) - bp->b_resid; if (size < xfersize) xfersize = size; error = uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); /* * If the buffer is not already filled and we encounter an * error while trying to fill it, we have to clear out any * garbage data from the pages instantiated for the buffer. * If we do not, a failed uiomove() during a write can leave * the prior contents of the pages exposed to a userland mmap. * * Note that we need only clear buffers with a transfer size * equal to the block size because buffers with a shorter * transfer size were cleared above by the call to ext2_balloc() * with the BA_CLRBUF flag set. * * If the source region for uiomove identically mmaps the * buffer, uiomove() performed the NOP copy, and the buffer * content remains valid because the page fault handler * validated the pages. */ if (error != 0 && (bp->b_flags & B_CACHE) == 0 && fs->e2fs_bsize == xfersize) vfs_bio_clrbuf(bp); vfs_bio_set_flags(bp, ioflag); /* * If IO_SYNC each buffer is written synchronously. Otherwise * if we have a severe page deficiency write the buffer * asynchronously. Otherwise try to cluster, and if that * doesn't do it then either do an async write (if O_DIRECT), * or a delayed write (if not). */ if (ioflag & IO_SYNC) { (void)bwrite(bp); } else if (vm_page_count_severe() || buf_dirty_count_severe() || (ioflag & IO_ASYNC)) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else if (xfersize + blkoffset == fs->e2fs_fsize) { if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { bp->b_flags |= B_CLUSTEROK; cluster_write(vp, bp, ip->i_size, seqcount, 0); } else { bawrite(bp); } } else if (ioflag & IO_DIRECT) { bp->b_flags |= B_CLUSTEROK; bawrite(bp); } else { bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } if (error || xfersize == 0) break; } /* * If we successfully wrote any data, and we are not the superuser * we clear the setuid and setgid bits as a precaution against * tampering. */ if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ap->a_cred) { if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) ip->i_mode &= ~(ISUID | ISGID); } if (error) { if (ioflag & IO_UNIT) { (void)ext2_truncate(vp, osize, ioflag & IO_SYNC, ap->a_cred, uio->uio_td); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } } if (uio->uio_resid != resid) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (ioflag & IO_SYNC) error = ext2_update(vp, 1); } return (error); } Index: head/sys/fs/ext2fs/ext2fs.h =================================================================== --- head/sys/fs/ext2fs/ext2fs.h (revision 324705) +++ head/sys/fs/ext2fs/ext2fs.h (revision 324706) @@ -1,408 +1,407 @@ /*- * modified for EXT2FS support in Lites 1.1 * * Aug 1995, Godmar Back (gback@cs.utah.edu) * University of Utah, Department of Computer Science * * $FreeBSD$ */ /*- * Copyright (c) 2009 Aditya Sarawgi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #ifndef _FS_EXT2FS_EXT2FS_H_ #define _FS_EXT2FS_EXT2FS_H_ #include /* * Super block for an ext2fs file system. */ struct ext2fs { uint32_t e2fs_icount; /* Inode count */ uint32_t e2fs_bcount; /* blocks count */ uint32_t e2fs_rbcount; /* reserved blocks count */ uint32_t e2fs_fbcount; /* free blocks count */ uint32_t e2fs_ficount; /* free inodes count */ uint32_t e2fs_first_dblock; /* first data block */ uint32_t e2fs_log_bsize; /* block size = 1024*(2^e2fs_log_bsize) */ uint32_t e2fs_log_fsize; /* fragment size */ uint32_t e2fs_bpg; /* blocks per group */ uint32_t e2fs_fpg; /* frags per group */ uint32_t e2fs_ipg; /* inodes per group */ uint32_t e2fs_mtime; /* mount time */ uint32_t e2fs_wtime; /* write time */ uint16_t e2fs_mnt_count; /* mount count */ uint16_t e2fs_max_mnt_count; /* max mount count */ uint16_t e2fs_magic; /* magic number */ uint16_t e2fs_state; /* file system state */ uint16_t e2fs_beh; /* behavior on errors */ uint16_t e2fs_minrev; /* minor revision level */ uint32_t e2fs_lastfsck; /* time of last fsck */ uint32_t e2fs_fsckintv; /* max time between fscks */ uint32_t e2fs_creator; /* creator OS */ uint32_t e2fs_rev; /* revision level */ uint16_t e2fs_ruid; /* default uid for reserved blocks */ uint16_t e2fs_rgid; /* default gid for reserved blocks */ /* EXT2_DYNAMIC_REV superblocks */ uint32_t e2fs_first_ino; /* first non-reserved inode */ uint16_t e2fs_inode_size; /* size of inode structure */ uint16_t e2fs_block_group_nr; /* block grp number of this sblk*/ uint32_t e2fs_features_compat; /* compatible feature set */ uint32_t e2fs_features_incompat; /* incompatible feature set */ uint32_t e2fs_features_rocompat; /* RO-compatible feature set */ uint8_t e2fs_uuid[16]; /* 128-bit uuid for volume */ char e2fs_vname[16]; /* volume name */ char e2fs_fsmnt[64]; /* name mounted on */ uint32_t e2fs_algo; /* For compression */ uint8_t e2fs_prealloc; /* # of blocks for old prealloc */ uint8_t e2fs_dir_prealloc; /* # of blocks for old prealloc dirs */ uint16_t e2fs_reserved_ngdb; /* # of reserved gd blocks for resize */ char e3fs_journal_uuid[16]; /* uuid of journal superblock */ uint32_t e3fs_journal_inum; /* inode number of journal file */ uint32_t e3fs_journal_dev; /* device number of journal file */ uint32_t e3fs_last_orphan; /* start of list of inodes to delete */ uint32_t e3fs_hash_seed[4]; /* HTREE hash seed */ char e3fs_def_hash_version;/* Default hash version to use */ char e3fs_jnl_backup_type; uint16_t e3fs_desc_size; /* size of group descriptor */ uint32_t e3fs_default_mount_opts; uint32_t e3fs_first_meta_bg; /* First metablock block group */ uint32_t e3fs_mkfs_time; /* when the fs was created */ uint32_t e3fs_jnl_blks[17]; /* backup of the journal inode */ uint32_t e4fs_bcount_hi; /* high bits of blocks count */ uint32_t e4fs_rbcount_hi; /* high bits of reserved blocks count */ uint32_t e4fs_fbcount_hi; /* high bits of free blocks count */ uint16_t e4fs_min_extra_isize; /* all inodes have some bytes */ uint16_t e4fs_want_extra_isize;/* inodes must reserve some bytes */ uint32_t e4fs_flags; /* miscellaneous flags */ uint16_t e4fs_raid_stride; /* RAID stride */ uint16_t e4fs_mmpintv; /* seconds to wait in MMP checking */ uint64_t e4fs_mmpblk; /* block for multi-mount protection */ uint32_t e4fs_raid_stripe_wid; /* blocks on data disks (N * stride) */ uint8_t e4fs_log_gpf; /* FLEX_BG group size */ uint8_t e4fs_chksum_type; /* metadata checksum algorithm used */ uint8_t e4fs_encrypt; /* versioning level for encryption */ uint8_t e4fs_reserved_pad; uint64_t e4fs_kbytes_written; /* number of lifetime kilobytes */ uint32_t e4fs_snapinum; /* inode number of active snapshot */ uint32_t e4fs_snapid; /* sequential ID of active snapshot */ uint64_t e4fs_snaprbcount; /* reserved blocks for active snapshot */ uint32_t e4fs_snaplist; /* inode number for on-disk snapshot */ uint32_t e4fs_errcount; /* number of file system errors */ uint32_t e4fs_first_errtime; /* first time an error happened */ uint32_t e4fs_first_errino; /* inode involved in first error */ uint64_t e4fs_first_errblk; /* block involved of first error */ uint8_t e4fs_first_errfunc[32];/* function where error happened */ uint32_t e4fs_first_errline; /* line number where error happened */ uint32_t e4fs_last_errtime; /* most recent time of an error */ uint32_t e4fs_last_errino; /* inode involved in last error */ uint32_t e4fs_last_errline; /* line number where error happened */ uint64_t e4fs_last_errblk; /* block involved of last error */ uint8_t e4fs_last_errfunc[32]; /* function where error happened */ uint8_t e4fs_mount_opts[64]; uint32_t e4fs_usrquota_inum; /* inode for tracking user quota */ uint32_t e4fs_grpquota_inum; /* inode for tracking group quota */ uint32_t e4fs_overhead_clusters;/* overhead blocks/clusters */ uint32_t e4fs_backup_bgs[2]; /* groups with sparse_super2 SBs */ uint8_t e4fs_encrypt_algos[4];/* encryption algorithms in use */ uint8_t e4fs_encrypt_pw_salt[16];/* salt used for string2key */ uint32_t e4fs_lpf_ino; /* location of the lost+found inode */ uint32_t e4fs_proj_quota_inum; /* inode for tracking project quota */ uint32_t e4fs_chksum_seed; /* checksum seed */ uint32_t e4fs_reserved[98]; /* padding to the end of the block */ uint32_t e4fs_sbchksum; /* superblock checksum */ }; /* * The path name on which the file system is mounted is maintained * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in * the super block for this name. */ #define MAXMNTLEN 512 /* * In-Memory Superblock */ struct m_ext2fs { struct ext2fs * e2fs; char e2fs_fsmnt[MAXMNTLEN];/* name mounted on */ char e2fs_ronly; /* mounted read-only flag */ char e2fs_fmod; /* super block modified flag */ uint32_t e2fs_bsize; /* Block size */ uint32_t e2fs_bshift; /* calc of logical block no */ uint32_t e2fs_bpg; /* Number of blocks per group */ int64_t e2fs_qbmask; /* = s_blocksize -1 */ uint32_t e2fs_fsbtodb; /* Shift to get disk block */ uint32_t e2fs_ipg; /* Number of inodes per group */ uint32_t e2fs_ipb; /* Number of inodes per block */ uint32_t e2fs_itpg; /* Number of inode table per group */ uint32_t e2fs_fsize; /* Size of fragments per block */ uint32_t e2fs_fpb; /* Number of fragments per block */ uint32_t e2fs_fpg; /* Number of fragments per group */ uint32_t e2fs_gdbcount; /* Number of group descriptors */ uint32_t e2fs_gcount; /* Number of groups */ uint32_t e2fs_isize; /* Size of inode */ uint32_t e2fs_total_dir; /* Total number of directories */ uint8_t *e2fs_contigdirs; /* (u) # of contig. allocated dirs */ char e2fs_wasvalid; /* valid at mount time */ off_t e2fs_maxfilesize; struct ext2_gd *e2fs_gd; /* Group Descriptors */ int32_t e2fs_contigsumsize; /* size of cluster summary array */ int32_t *e2fs_maxcluster; /* max cluster in each cyl group */ struct csum *e2fs_clustersum; /* cluster summary in each cyl group */ int32_t e2fs_uhash; /* 3 if hash should be signed, 0 if not */ }; /* cluster summary information */ struct csum { int8_t cs_init; /* cluster summary has been initialized */ int32_t *cs_sum; /* cluster summary array */ }; /* * The second extended file system magic number */ #define E2FS_MAGIC 0xEF53 /* * Revision levels */ #define E2FS_REV0 0 /* The good old (original) format */ #define E2FS_REV1 1 /* V2 format w/ dynamic inode sizes */ #define E2FS_REV0_INODE_SIZE 128 /* * compatible/incompatible features */ #define EXT2F_COMPAT_PREALLOC 0x0001 #define EXT2F_COMPAT_IMAGIC_INODES 0x0002 #define EXT2F_COMPAT_HASJOURNAL 0x0004 #define EXT2F_COMPAT_EXT_ATTR 0x0008 #define EXT2F_COMPAT_RESIZE 0x0010 #define EXT2F_COMPAT_DIRHASHINDEX 0x0020 #define EXT2F_COMPAT_LAZY_BG 0x0040 #define EXT2F_COMPAT_EXCLUDE_BITMAP 0x0100 #define EXT2F_COMPAT_SPARSESUPER2 0x0200 #define EXT2F_ROCOMPAT_SPARSESUPER 0x0001 #define EXT2F_ROCOMPAT_LARGEFILE 0x0002 #define EXT2F_ROCOMPAT_BTREE_DIR 0x0004 #define EXT2F_ROCOMPAT_HUGE_FILE 0x0008 #define EXT2F_ROCOMPAT_GDT_CSUM 0x0010 #define EXT2F_ROCOMPAT_DIR_NLINK 0x0020 #define EXT2F_ROCOMPAT_EXTRA_ISIZE 0x0040 #define EXT2F_ROCOMPAT_HAS_SNAPSHOT 0x0080 #define EXT2F_ROCOMPAT_QUOTA 0x0100 #define EXT2F_ROCOMPAT_BIGALLOC 0x0200 #define EXT2F_ROCOMPAT_METADATA_CKSUM 0x0400 #define EXT2F_ROCOMPAT_REPLICA 0x0800 #define EXT2F_ROCOMPAT_READONLY 0x1000 #define EXT2F_ROCOMPAT_PROJECT 0x2000 #define EXT2F_INCOMPAT_COMP 0x0001 #define EXT2F_INCOMPAT_FTYPE 0x0002 #define EXT2F_INCOMPAT_RECOVER 0x0004 #define EXT2F_INCOMPAT_JOURNAL_DEV 0x0008 #define EXT2F_INCOMPAT_META_BG 0x0010 #define EXT2F_INCOMPAT_EXTENTS 0x0040 #define EXT2F_INCOMPAT_64BIT 0x0080 #define EXT2F_INCOMPAT_MMP 0x0100 #define EXT2F_INCOMPAT_FLEX_BG 0x0200 #define EXT2F_INCOMPAT_EA_INODE 0x0400 #define EXT2F_INCOMPAT_DIRDATA 0x1000 #define EXT2F_INCOMPAT_CSUM_SEED 0x2000 #define EXT2F_INCOMPAT_LARGEDIR 0x4000 #define EXT2F_INCOMPAT_INLINE_DATA 0x8000 #define EXT2F_INCOMPAT_ENCRYPT 0x10000 struct ext2_feature { int mask; const char *name; }; static const struct ext2_feature compat[] = { { EXT2F_COMPAT_PREALLOC, "dir_prealloc" }, { EXT2F_COMPAT_IMAGIC_INODES, "imagic_inodes" }, { EXT2F_COMPAT_HASJOURNAL, "has_journal" }, { EXT2F_COMPAT_EXT_ATTR, "ext_attr" }, { EXT2F_COMPAT_RESIZE, "resize_inode" }, { EXT2F_COMPAT_DIRHASHINDEX, "dir_index" }, { EXT2F_COMPAT_EXCLUDE_BITMAP, "snapshot_bitmap" }, { EXT2F_COMPAT_SPARSESUPER2, "sparse_super2" } }; static const struct ext2_feature ro_compat[] = { { EXT2F_ROCOMPAT_SPARSESUPER, "sparse_super" }, { EXT2F_ROCOMPAT_LARGEFILE, "large_file" }, { EXT2F_ROCOMPAT_BTREE_DIR, "btree_dir" }, { EXT2F_ROCOMPAT_HUGE_FILE, "huge_file" }, { EXT2F_ROCOMPAT_GDT_CSUM, "uninit_groups" }, { EXT2F_ROCOMPAT_DIR_NLINK, "dir_nlink" }, { EXT2F_ROCOMPAT_EXTRA_ISIZE, "extra_isize" }, { EXT2F_ROCOMPAT_HAS_SNAPSHOT, "snapshot" }, { EXT2F_ROCOMPAT_QUOTA, "quota" }, { EXT2F_ROCOMPAT_BIGALLOC, "bigalloc" }, { EXT2F_ROCOMPAT_METADATA_CKSUM, "metadata_csum" }, { EXT2F_ROCOMPAT_REPLICA, "replica" }, { EXT2F_ROCOMPAT_READONLY, "ro" }, { EXT2F_ROCOMPAT_PROJECT, "project" } }; static const struct ext2_feature incompat[] = { { EXT2F_INCOMPAT_COMP, "compression" }, { EXT2F_INCOMPAT_FTYPE, "filetype" }, { EXT2F_INCOMPAT_RECOVER, "needs_recovery" }, { EXT2F_INCOMPAT_JOURNAL_DEV, "journal_dev" }, { EXT2F_INCOMPAT_META_BG, "meta_bg" }, { EXT2F_INCOMPAT_EXTENTS, "extents" }, { EXT2F_INCOMPAT_64BIT, "64bit" }, { EXT2F_INCOMPAT_MMP, "mmp" }, { EXT2F_INCOMPAT_FLEX_BG, "flex_bg" }, { EXT2F_INCOMPAT_EA_INODE, "ea_inode" }, { EXT2F_INCOMPAT_DIRDATA, "dirdata" }, { EXT2F_INCOMPAT_CSUM_SEED, "metadata_csum_seed" }, { EXT2F_INCOMPAT_LARGEDIR, "large_dir" }, { EXT2F_INCOMPAT_INLINE_DATA, "inline_data" }, { EXT2F_INCOMPAT_ENCRYPT, "encrypt" } }; /* * Features supported in this implementation * * We support the following REV1 features: * - EXT2F_ROCOMPAT_SPARSESUPER * - EXT2F_ROCOMPAT_LARGEFILE * - EXT2F_ROCOMPAT_EXTRA_ISIZE * - EXT2F_INCOMPAT_FTYPE * * We partially (read-only) support the following EXT4 features: * - EXT2F_ROCOMPAT_HUGE_FILE * - EXT2F_INCOMPAT_EXTENTS * * We do not support these EXT4 features but they are irrelevant * for read-only support: * - EXT2F_INCOMPAT_RECOVER * - EXT2F_INCOMPAT_FLEX_BG * - EXT2F_INCOMPAT_META_BG */ #define EXT2F_COMPAT_SUPP EXT2F_COMPAT_DIRHASHINDEX #define EXT2F_ROCOMPAT_SUPP (EXT2F_ROCOMPAT_SPARSESUPER | \ EXT2F_ROCOMPAT_LARGEFILE | \ EXT2F_ROCOMPAT_GDT_CSUM | \ EXT2F_ROCOMPAT_DIR_NLINK | \ EXT2F_ROCOMPAT_HUGE_FILE | \ EXT2F_ROCOMPAT_EXTRA_ISIZE) #define EXT2F_INCOMPAT_SUPP EXT2F_INCOMPAT_FTYPE #define EXT4F_RO_INCOMPAT_SUPP (EXT2F_INCOMPAT_EXTENTS | \ EXT2F_INCOMPAT_RECOVER | \ EXT2F_INCOMPAT_FLEX_BG | \ EXT2F_INCOMPAT_META_BG ) /* Assume that user mode programs are passing in an ext2fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test * macros from user land. */ #define EXT2_SB(sb) (sb) /* * Feature set definitions */ #define EXT2_HAS_COMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_compat & htole32(mask) ) #define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_rocompat & htole32(mask) ) #define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \ ( EXT2_SB(sb)->e2fs->e2fs_features_incompat & htole32(mask) ) /* * File clean flags */ #define E2FS_ISCLEAN 0x0001 /* Unmounted cleanly */ #define E2FS_ERRORS 0x0002 /* Errors detected */ /* * Filesystem miscellaneous flags */ #define E2FS_SIGNED_HASH 0x0001 #define E2FS_UNSIGNED_HASH 0x0002 #define EXT2_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ #define EXT2_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ #define EXT2_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ /* ext2 file system block group descriptor */ struct ext2_gd { uint32_t ext2bgd_b_bitmap; /* blocks bitmap block */ uint32_t ext2bgd_i_bitmap; /* inodes bitmap block */ uint32_t ext2bgd_i_tables; /* inodes table block */ uint16_t ext2bgd_nbfree; /* number of free blocks */ uint16_t ext2bgd_nifree; /* number of free inodes */ uint16_t ext2bgd_ndirs; /* number of directories */ uint16_t ext4bgd_flags; /* block group flags */ uint32_t ext4bgd_x_bitmap; /* snapshot exclusion bitmap loc. */ uint16_t ext4bgd_b_bmap_csum; /* block bitmap checksum */ uint16_t ext4bgd_i_bmap_csum; /* inode bitmap checksum */ uint16_t ext4bgd_i_unused; /* unused inode count */ uint16_t ext4bgd_csum; /* group descriptor checksum */ }; /* EXT2FS metadata is stored in little-endian byte order. These macros * help reading it. */ #define e2fs_cgload(old, new, size) memcpy((new), (old), (size)); #define e2fs_cgsave(old, new, size) memcpy((new), (old), (size)); /* * Macro-instructions used to manage several block sizes */ -#define EXT2_MAX_BLOCK_SIZE 4096 #define EXT2_MIN_BLOCK_LOG_SIZE 10 #define EXT2_BLOCK_SIZE(s) ((s)->e2fs_bsize) #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof(uint32_t)) #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->e2fs_isize) /* * Macro-instructions used to manage fragments */ #define EXT2_MIN_FRAG_SIZE 1024 #define EXT2_MAX_FRAG_SIZE 4096 #define EXT2_MIN_FRAG_LOG_SIZE 10 #define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->e2fs_fsize) #define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->e2fs_fpb) /* * Macro-instructions used to manage group descriptors */ #define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->e2fs_bpg) #endif /* !_FS_EXT2FS_EXT2FS_H_ */ Index: head/sys/fs/ext2fs/inode.h =================================================================== --- head/sys/fs/ext2fs/inode.h (revision 324705) +++ head/sys/fs/ext2fs/inode.h (revision 324706) @@ -1,189 +1,194 @@ /*- * Copyright (c) 1982, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)inode.h 8.9 (Berkeley) 5/14/95 * $FreeBSD$ */ #ifndef _FS_EXT2FS_INODE_H_ #define _FS_EXT2FS_INODE_H_ #include #include #include #include #include /* * This must agree with the definition in . */ #define doff_t int32_t #define EXT2_NDADDR 12 /* Direct addresses in inode. */ #define EXT2_NIADDR 3 /* Indirect addresses in inode. */ /* * The size of physical and logical block numbers in EXT2FS. */ typedef uint32_t e2fs_daddr_t; typedef int64_t e2fs_lbn_t; typedef int64_t e4fs_daddr_t; /* * The inode is used to describe each active (or recently active) file in the * EXT2FS filesystem. It is composed of two types of information. The first * part is the information that is needed only while the file is active (such * as the identity of the file and linkage to speed its lookup). The second * part is the permanent meta-data associated with the file which is read in * from the permanent dinode from long term storage when the file becomes * active, and is put back when the file is no longer being used. */ struct inode { struct vnode *i_vnode;/* Vnode associated with this inode. */ struct ext2mount *i_ump; uint32_t i_flag; /* flags, see below */ ino_t i_number; /* The identity of the inode. */ struct m_ext2fs *i_e2fs; /* EXT2FS */ u_quad_t i_modrev; /* Revision level for NFS lease. */ /* * Side effects; used during directory lookup. */ int32_t i_count; /* Size of free slot in directory. */ doff_t i_endoff; /* End of useful stuff in directory. */ doff_t i_diroff; /* Offset in dir, where we found last entry. */ doff_t i_offset; /* Offset of free space in directory. */ uint32_t i_block_group; uint32_t i_next_alloc_block; uint32_t i_next_alloc_goal; /* Fields from struct dinode in UFS. */ uint16_t i_mode; /* IFMT, permissions; see below. */ int32_t i_nlink; /* File link count. */ uint32_t i_uid; /* File owner. */ uint32_t i_gid; /* File group. */ uint64_t i_size; /* File byte count. */ uint64_t i_blocks; /* Blocks actually held. */ int32_t i_atime; /* Last access time. */ int32_t i_mtime; /* Last modified time. */ int32_t i_ctime; /* Last inode change time. */ int32_t i_birthtime; /* Inode creation time. */ int32_t i_mtimensec; /* Last modified time. */ int32_t i_atimensec; /* Last access time. */ int32_t i_ctimensec; /* Last inode change time. */ int32_t i_birthnsec; /* Inode creation time. */ uint32_t i_gen; /* Generation number. */ uint64_t i_facl; /* EA block number. */ uint32_t i_flags; /* Status flags (chflags). */ - uint32_t i_db[EXT2_NDADDR]; /* Direct disk blocks. */ - uint32_t i_ib[EXT2_NIADDR]; /* Indirect disk blocks. */ + union { + struct { + uint32_t i_db[EXT2_NDADDR]; /* Direct disk blocks. */ + uint32_t i_ib[EXT2_NIADDR]; /* Indirect disk blocks. */ + }; + uint32_t i_data[EXT2_NDADDR + EXT2_NIADDR]; + }; struct ext4_extent_cache i_ext_cache; /* cache for ext4 extent */ }; /* * The di_db fields may be overlaid with other information for * file types that do not have associated disk storage. Block * and character devices overlay the first data block with their * dev_t value. Short symbolic links place their path in the * di_db area. */ #define i_shortlink i_db #define i_rdev i_db[0] /* File permissions. */ #define IEXEC 0000100 /* Executable. */ #define IWRITE 0000200 /* Writeable. */ #define IREAD 0000400 /* Readable. */ #define ISVTX 0001000 /* Sticky bit. */ #define ISGID 0002000 /* Set-gid. */ #define ISUID 0004000 /* Set-uid. */ /* File types. */ #define IFMT 0170000 /* Mask of file type. */ #define IFIFO 0010000 /* Named pipe (fifo). */ #define IFCHR 0020000 /* Character device. */ #define IFDIR 0040000 /* Directory file. */ #define IFBLK 0060000 /* Block device. */ #define IFREG 0100000 /* Regular file. */ #define IFLNK 0120000 /* Symbolic link. */ #define IFSOCK 0140000 /* UNIX domain socket. */ #define IFWHT 0160000 /* Whiteout. */ /* These flags are kept in i_flag. */ #define IN_ACCESS 0x0001 /* Access time update request. */ #define IN_CHANGE 0x0002 /* Inode change time update request. */ #define IN_UPDATE 0x0004 /* Modification time update request. */ #define IN_MODIFIED 0x0008 /* Inode has been modified. */ #define IN_RENAME 0x0010 /* Inode is being renamed. */ #define IN_HASHED 0x0020 /* Inode is on hash list */ #define IN_LAZYMOD 0x0040 /* Modified, but don't write yet. */ #define IN_SPACECOUNTED 0x0080 /* Blocks to be freed in free count. */ #define IN_LAZYACCESS 0x0100 /* Process IN_ACCESS after the suspension finished */ /* * These are translation flags for some attributes that Ext4 * passes as inode flags but that we cannot pass directly. */ #define IN_E3INDEX 0x010000 #define IN_E4EXTENTS 0x020000 #define i_devvp i_ump->um_devvp #ifdef _KERNEL /* * Structure used to pass around logical block paths generated by * ext2_getlbns and used by truncate and bmap code. */ struct indir { e2fs_lbn_t in_lbn; /* Logical block number. */ int in_off; /* Offset in buffer. */ }; /* Convert between inode pointers and vnode pointers. */ #define VTOI(vp) ((struct inode *)(vp)->v_data) #define ITOV(ip) ((ip)->i_vnode) /* This overlays the fid structure (see mount.h). */ struct ufid { uint16_t ufid_len; /* Length of structure. */ uint16_t ufid_pad; /* Force 32-bit alignment. */ ino_t ufid_ino; /* File number (ino). */ uint32_t ufid_gen; /* Generation number. */ }; #endif /* _KERNEL */ #endif /* !_FS_EXT2FS_INODE_H_ */