Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cddl/lib/libzpool/Makefile b/cddl/lib/libzpool/Makefile
index 5dbeb9cb1721..217159611e97 100644
--- a/cddl/lib/libzpool/Makefile
+++ b/cddl/lib/libzpool/Makefile
@@ -1,338 +1,337 @@
# $FreeBSD$
ZFSTOP= ${SRCTOP}/sys/contrib/openzfs
# ZFS_COMMON_SRCS
.PATH: ${ZFSTOP}/module/zfs
.PATH: ${ZFSTOP}/module/zcommon
.PATH: ${ZFSTOP}/module/unicode
# LUA_SRCS
.PATH: ${ZFSTOP}/module/lua
# ZSTD_SRCS
.PATH: ${ZFSTOP}/module/zstd
.PATH: ${ZFSTOP}/module/zstd/lib/common
.PATH: ${ZFSTOP}/module/zstd/lib/compress
.PATH: ${ZFSTOP}/module/zstd/lib/decompress
.PATH: ${ZFSTOP}/module/os/linux/zfs
.PATH: ${ZFSTOP}/lib/libzpool
.if exists(${SRCTOP}/sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}/opensolaris_atomic.S)
.PATH: ${SRCTOP}/sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}
ATOMIC_SRCS= opensolaris_atomic.S
ACFLAGS+= -Wa,--noexecstack
.else
.PATH: ${SRCTOP}/sys/cddl/compat/opensolaris/kern
ATOMIC_SRCS= opensolaris_atomic.c
.endif
.if ${MACHINE_ARCH} == "powerpc" || ${MACHINE_ARCH} == "powerpcspe"
# Don't waste GOT entries on small data.
PICFLAG= -fPIC
.endif
PACKAGE= zfs
LIB= zpool
USER_C = \
kernel.c \
taskq.c \
util.c
KERNEL_C = \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zfs_zstd.c \
zpool_prop.c \
zprop_common.c \
abd.c \
abd_os.c \
aggsum.c \
arc.c \
arc_os.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
bptree.c \
bqueue.c \
btree.c \
brt.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_bookmark.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_dir.c \
dsl_crypt.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_destroy.c \
dsl_userhold.c \
edonr_zfs.c \
entropy_common.c \
error_private.c \
fm.c \
fse_compress.c \
fse_decompress.c \
gzip.c \
hist.c \
hkdf.c \
huf_compress.c \
huf_decompress.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
pool.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha2_zfs.c \
skein_zfs.c \
spa.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
trace.c \
uberblock.c \
unique.c \
vdev.c \
- vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_file.c \
vdev_indirect_births.c \
vdev_indirect.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math_aarch64_neon.c \
vdev_raidz_math_aarch64_neonx2.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
xxhash.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_debug.c \
zfs_fm.c \
zfs_fuid.c \
zfs_sa.c \
zfs_znode.c \
zfs_racct.c \
zfs_ratelimit.c \
zfs_rlock.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_crypt.c \
zio_inject.c \
zle.c \
zrlock.c \
zstd_common.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c \
zthr.c
ARCH_C =
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
ARCH_C += vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c
CFLAGS += -DHAVE_SSE2 -DHAVE_SSE3
.endif
.if ${MACHINE_ARCH} == "amd64"
ARCH_C += zfs_fletcher_avx512.c
CFLAGS+= -DHAVE_AVX2 -DHAVE_AVX -D__x86_64 -DHAVE_AVX512F \
-DHAVE_AVX512BW
.endif
.if ${MACHINE_CPUARCH} == "aarch64"
ARCH_C += zfs_fletcher_aarch64_neon.c
.endif
LUA_C = \
lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
UNICODE_C = u8_textprep.c uconv.c
SRCS= ${USER_C} ${KERNEL_C} ${LUA_C} ${UNICODE_C} ${ARCH_C}
WARNS?= 2
CFLAGS+= \
-DIN_BASE \
-I${ZFSTOP}/include \
-I${ZFSTOP}/lib/libspl/include \
-I${ZFSTOP}/lib/libspl/include/os/freebsd \
-I${SRCTOP}/sys \
-I${ZFSTOP}/include/os/freebsd/zfs \
-I${SRCTOP}/cddl/compat/opensolaris/include \
-I${ZFSTOP}/module/icp/include \
-include ${ZFSTOP}/include/os/freebsd/spl/sys/ccompile.h \
-DHAVE_ISSETUGID \
-include ${SRCTOP}/sys/modules/zfs/zfs_config.h \
-I${SRCTOP}/sys/modules/zfs \
-I${ZFSTOP}/include/os/freebsd/zfs \
-DLIB_ZPOOL_BUILD -DZFS_DEBUG \
# XXX: pthread doesn't have mutex_owned() equivalent, so we need to look
# into libthr private structures. That's sooo evil, but it's only for
# ZFS debugging tools needs.
CFLAGS+= -DWANTS_MUTEX_OWNED
CFLAGS+= -I${SRCTOP}/lib/libpthread/thread
CFLAGS+= -I${SRCTOP}/lib/libpthread/sys
CFLAGS+= -I${SRCTOP}/lib/libthr/arch/${MACHINE_CPUARCH}/include
CFLAGS.gcc+= -fms-extensions
LIBADD= md pthread z spl icp nvpair avl umem
# atomic.S doesn't like profiling.
MK_PROFILE= no
CSTD= c99
# Since there are many asserts in this library, it makes no sense to compile
# it without debugging.
CFLAGS+= -g -DDEBUG=1
# Pointer values are used as debugging "tags" to mark reference count
# ownerships and in some cases the tag reference is dropped after an
# object is freed.
CFLAGS.dbuf.c= ${NO_WUSE_AFTER_FREE}
CFLAGS.entropy_common.c= -fno-tree-vectorize
CFLAGS.entropy_common.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.error_private.c= -fno-tree-vectorize
CFLAGS.error_private.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c= -fno-tree-vectorize
CFLAGS.fse_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_decompress.c= -fno-tree-vectorize
CFLAGS.fse_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.hist.c= -fno-tree-vectorize
CFLAGS.hist.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_compress.c= -fno-tree-vectorize
CFLAGS.huf_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_decompress.c= -fno-tree-vectorize
CFLAGS.huf_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.pool.c= -fno-tree-vectorize
CFLAGS.pool.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.xxhash.c= -fno-tree-vectorize
CFLAGS.xxhash.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress.c= -fno-tree-vectorize
CFLAGS.zstd_compress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_literals.c= -fno-tree-vectorize
CFLAGS.zstd_compress_literals.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_sequences.c= -fno-tree-vectorize
CFLAGS.zstd_compress_sequences.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c= -fno-tree-vectorize
CFLAGS.zstd_compress_superblock.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_double_fast.c= -fno-tree-vectorize
CFLAGS.zstd_double_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_fast.c= -fno-tree-vectorize
CFLAGS.zstd_fast.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_lazy.c= -fno-tree-vectorize
CFLAGS.zstd_lazy.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ldm.c= -fno-tree-vectorize
CFLAGS.zstd_ldm.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_opt.c= -fno-tree-vectorize
CFLAGS.zstd_opt.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ddict.c= -fno-tree-vectorize
CFLAGS.zstd_ddict.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress.c= -fno-tree-vectorize
CFLAGS.zstd_decompress.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress_block.c= -fno-tree-vectorize
CFLAGS.zstd_decompress_block.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
.include <bsd.lib.mk>
diff --git a/sys/conf/files b/sys/conf/files
index a5f55b49451c..726c4c7c8b2c 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,5232 +1,5231 @@
# $FreeBSD$
#
# The long compile-with and dependency lines are required because of
# limitations in config: backslash-newline doesn't work in strings, and
# dependency lines other than the first are silently ignored.
#
acpi_quirks.h optional acpi \
dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \
compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \
no-obj no-implicit-rule before-depend \
clean "acpi_quirks.h"
bhnd_nvram_map.h optional bhnd \
dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map.h"
bhnd_nvram_map_data.h optional bhnd \
dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map_data.h"
fdt_static_dtb.h optional fdt fdt_dtb_static \
compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \
dependency "${FDT_DTS_FILE:T:R}.dtb" \
no-obj no-implicit-rule before-depend \
clean "fdt_static_dtb.h"
feeder_eq_gen.h optional sound \
dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \
compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \
no-obj no-implicit-rule before-depend \
clean "feeder_eq_gen.h"
feeder_rate_gen.h optional sound \
dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \
compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \
no-obj no-implicit-rule before-depend \
clean "feeder_rate_gen.h"
font.h optional sc_dflt_font \
compile-with "uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < ${SRCTOP}/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \
no-obj no-implicit-rule before-depend \
clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8"
snd_fxdiv_gen.h optional sound \
dependency "$S/tools/sound/snd_fxdiv_gen.awk" \
compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \
no-obj no-implicit-rule before-depend \
clean "snd_fxdiv_gen.h"
miidevs.h optional miibus | mii \
dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \
compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \
no-obj no-implicit-rule before-depend \
clean "miidevs.h"
kbdmuxmap.h optional kbdmux_dflt_keymap \
compile-with "${KEYMAP} -L ${KBDMUX_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \
no-obj no-implicit-rule before-depend \
clean "kbdmuxmap.h"
teken_state.h optional sc | vt \
dependency "$S/teken/gensequences $S/teken/sequences" \
compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \
no-obj no-implicit-rule before-depend \
clean "teken_state.h"
ukbdmap.h optional ukbd_dflt_keymap \
compile-with "${KEYMAP} -L ${UKBD_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \
no-obj no-implicit-rule before-depend \
clean "ukbdmap.h"
usbdevs.h optional usb | hid \
dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \
compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \
no-obj no-implicit-rule before-depend \
clean "usbdevs.h"
usbdevs_data.h optional usb \
dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \
compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \
no-obj no-implicit-rule before-depend \
clean "usbdevs_data.h"
sdiodevs.h optional mmccam \
dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \
compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -h" \
no-obj no-implicit-rule before-depend \
clean "sdiodevs.h"
sdiodevs_data.h optional mmccam \
dependency "$S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs" \
compile-with "${AWK} -f $S/tools/sdiodevs2h.awk $S/dev/sdio/sdiodevs -d" \
no-obj no-implicit-rule before-depend \
clean "sdiodevs_data.h"
cam/cam.c optional scbus
cam/cam_compat.c optional scbus
cam/cam_iosched.c optional scbus
cam/cam_periph.c optional scbus
cam/cam_queue.c optional scbus
cam/cam_sim.c optional scbus
cam/cam_xpt.c optional scbus
cam/ata/ata_all.c optional scbus
cam/ata/ata_xpt.c optional scbus
cam/ata/ata_pmp.c optional scbus
cam/nvme/nvme_all.c optional scbus
cam/nvme/nvme_da.c optional nda | da
cam/nvme/nvme_xpt.c optional scbus
cam/scsi/scsi_xpt.c optional scbus
cam/scsi/scsi_all.c optional scbus
cam/scsi/scsi_cd.c optional cd
cam/scsi/scsi_ch.c optional ch
cam/ata/ata_da.c optional ada | da
cam/ctl/ctl.c optional ctl
cam/ctl/ctl_backend.c optional ctl
cam/ctl/ctl_backend_block.c optional ctl
cam/ctl/ctl_backend_ramdisk.c optional ctl
cam/ctl/ctl_cmd_table.c optional ctl
cam/ctl/ctl_frontend.c optional ctl
cam/ctl/ctl_frontend_cam_sim.c optional ctl
cam/ctl/ctl_frontend_ioctl.c optional ctl
cam/ctl/ctl_frontend_iscsi.c optional ctl cfiscsi
cam/ctl/ctl_ha.c optional ctl
cam/ctl/ctl_scsi_all.c optional ctl
cam/ctl/ctl_tpc.c optional ctl
cam/ctl/ctl_tpc_local.c optional ctl
cam/ctl/ctl_error.c optional ctl
cam/ctl/ctl_util.c optional ctl
cam/ctl/scsi_ctl.c optional ctl
cam/mmc/mmc_xpt.c optional scbus mmccam
cam/mmc/mmc_sim.c optional scbus mmccam
cam/mmc/mmc_sim_if.m optional scbus mmccam
cam/mmc/mmc_da.c optional scbus mmccam da
cam/scsi/scsi_da.c optional da
cam/scsi/scsi_pass.c optional pass
cam/scsi/scsi_pt.c optional pt
cam/scsi/scsi_sa.c optional sa
cam/scsi/scsi_enc.c optional ses
cam/scsi/scsi_enc_ses.c optional ses
cam/scsi/scsi_enc_safte.c optional ses
cam/scsi/scsi_sg.c optional sg
cam/scsi/scsi_targ_bh.c optional targbh
cam/scsi/scsi_target.c optional targ
cam/scsi/smp_all.c optional scbus
# shared between zfs and dtrace
cddl/compat/opensolaris/kern/opensolaris.c optional dtrace compile-with "${CDDL_C}"
cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}"
contrib/openzfs/module/os/freebsd/spl/spl_misc.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_cmn_err.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_taskq.c optional zfs | dtrace compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_kmem.c optional zfs | dtrace compile-with "${ZFS_C}"
#zfs solaris portability layer
contrib/openzfs/module/os/freebsd/spl/acl_common.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/callb.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/list.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_acl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_dtrace.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_kstat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_policy.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_procfs_list.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_string.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_sunddi.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_uio.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_vfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_vm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_zlib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/spl/spl_zone.c optional zfs compile-with "${ZFS_C}"
# zfs specific
#zfs avl
contrib/openzfs/module/avl/avl.c optional zfs compile-with "${ZFS_C}"
# zfs lua support
contrib/openzfs/module/lua/lapi.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lauxlib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lbaselib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcode.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcompat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lcorolib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lctype.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ldebug.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ldo.c optional zfs compile-with "${ZFS_C} ${NO_WINFINITE_RECURSION}"
contrib/openzfs/module/lua/lfunc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lgc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/llex.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lmem.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lobject.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lopcodes.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lparser.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstate.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstring.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lstrlib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltable.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltablib.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/ltm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lvm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/lua/lzio.c optional zfs compile-with "${ZFS_C}"
# zfs nvpair support
contrib/openzfs/module/nvpair/fnvpair.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/nvpair/nvpair.c optional zfs compile-with "${ZFS_RPC_C} ${NO_WSTRINGOP_OVERREAD}"
contrib/openzfs/module/nvpair/nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/nvpair/nvpair_alloc_spl.c optional zfs compile-with "${ZFS_C}"
#zfs platform compatibility code
contrib/openzfs/module/os/freebsd/zfs/abd_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/arc_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/crypto_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/dmu_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/event_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/hkdf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/kmod_core.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/spa_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c optional zfs compile-with "${ZFS_C} -include $S/modules/zfs/zfs_config.h"
contrib/openzfs/module/os/freebsd/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/vdev_label_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_ioctl_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/os/freebsd/zfs/zvol_os.c optional zfs compile-with "${ZFS_C}"
#zfs unicode support
contrib/openzfs/module/unicode/uconv.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}"
#zfs checksums / zcommon
contrib/openzfs/module/zcommon/cityhash.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfeature_common.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_comutil.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_deleg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher_superscalar.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_fletcher_superscalar4.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_namecheck.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zfs_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zpool_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zcommon/zprop_common.c optional zfs compile-with "${ZFS_C}"
# zfs edon-r hash support
contrib/openzfs/module/icp/algs/edonr/edonr.c optional zfs compile-with "${ZFS_C}"
# zfs blake3 hash support
contrib/openzfs/module/icp/algs/blake3/blake3.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/blake3/blake3_generic.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/blake3/blake3_impl.c optional zfs compile-with "${ZFS_C}"
# zfs sha2 hash support
contrib/openzfs/module/icp/algs/sha2/sha2_generic.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/sha2/sha256_impl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/icp/algs/sha2/sha512_impl.c optional zfs compile-with "${ZFS_C}"
#zfs core common code
contrib/openzfs/module/zfs/abd.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/aggsum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/arc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/blake3_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/blkptr.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bplist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bpobj.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bptree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/brt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/btree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/bqueue.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dbuf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dbuf_stats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dataset_kstats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/ddt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_recv.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_redact.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dnode.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" \
warning "kernel contains CDDL licensed ZFS filesystem"
contrib/openzfs/module/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_crypt.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/edonr_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/fm.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/gzip.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lzjb.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lz4.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/lz4_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/metaslab.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/mmp.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/multilist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/objlist.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/pathname.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/range_tree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/refcount.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/sa.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/sha2_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_checkpoint.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_config.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_history.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_log_spacemap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/spa_stats.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/space_map.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/txg.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/uberblock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/unique.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev.c optional zfs compile-with "${ZFS_C}"
-contrib/openzfs/module/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_draid.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_draid_rand.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect_births.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_indirect_mapping.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_initialize.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz_math.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_rebuild.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_removal.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/vdev_trim.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_get.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_global.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_iter.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_set.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zcp_synctask.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfeature.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_chksum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/openzfs/module/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_impl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_quota.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_ratelimit.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zstd/zfs_zstd.c optional zfs zstdio compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zil.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zle.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zrlock.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zthr.c optional zfs compile-with "${ZFS_C}"
contrib/openzfs/module/zfs/zvol.c optional zfs compile-with "${ZFS_C}"
# dtrace specific
cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \
warning "kernel contains CDDL licensed DTRACE"
cddl/contrib/opensolaris/uts/common/dtrace/dtrace_xoroshiro128_plus.c optional dtrace compile-with "${DTRACE_C}"
cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}"
cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}"
cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}"
cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}"
cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}"
cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}"
fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}"
compat/freebsd32/freebsd32_abort2.c optional compat_freebsd32
compat/freebsd32/freebsd32_capability.c optional compat_freebsd32
compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32
compat/freebsd32/freebsd32_misc.c optional compat_freebsd32
compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32
compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32
contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include"
contrib/dev/acpica/common/ahids.c optional acpi acpi_debug
contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug
contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug
contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug
contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi
contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi
contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi
contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi
contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi
contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi
contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi
contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi
contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi
contrib/dev/acpica/components/dispatcher/dspkginit.c optional acpi
contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi
contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi
contrib/dev/acpica/components/dispatcher/dswload.c optional acpi
contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi
contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi
contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi
contrib/dev/acpica/components/events/evevent.c optional acpi
contrib/dev/acpica/components/events/evglock.c optional acpi
contrib/dev/acpica/components/events/evgpe.c optional acpi
contrib/dev/acpica/components/events/evgpeblk.c optional acpi
contrib/dev/acpica/components/events/evgpeinit.c optional acpi
contrib/dev/acpica/components/events/evgpeutil.c optional acpi
contrib/dev/acpica/components/events/evhandler.c optional acpi
contrib/dev/acpica/components/events/evmisc.c optional acpi
contrib/dev/acpica/components/events/evregion.c optional acpi
contrib/dev/acpica/components/events/evrgnini.c optional acpi
contrib/dev/acpica/components/events/evsci.c optional acpi
contrib/dev/acpica/components/events/evxface.c optional acpi
contrib/dev/acpica/components/events/evxfevnt.c optional acpi
contrib/dev/acpica/components/events/evxfgpe.c optional acpi
contrib/dev/acpica/components/events/evxfregn.c optional acpi
contrib/dev/acpica/components/executer/exconcat.c optional acpi
contrib/dev/acpica/components/executer/exconfig.c optional acpi
contrib/dev/acpica/components/executer/exconvrt.c optional acpi
contrib/dev/acpica/components/executer/excreate.c optional acpi
contrib/dev/acpica/components/executer/exdebug.c optional acpi
contrib/dev/acpica/components/executer/exdump.c optional acpi
contrib/dev/acpica/components/executer/exfield.c optional acpi
contrib/dev/acpica/components/executer/exfldio.c optional acpi
contrib/dev/acpica/components/executer/exmisc.c optional acpi
contrib/dev/acpica/components/executer/exmutex.c optional acpi
contrib/dev/acpica/components/executer/exnames.c optional acpi
contrib/dev/acpica/components/executer/exoparg1.c optional acpi
contrib/dev/acpica/components/executer/exoparg2.c optional acpi
contrib/dev/acpica/components/executer/exoparg3.c optional acpi
contrib/dev/acpica/components/executer/exoparg6.c optional acpi
contrib/dev/acpica/components/executer/exprep.c optional acpi
contrib/dev/acpica/components/executer/exregion.c optional acpi
contrib/dev/acpica/components/executer/exresnte.c optional acpi
contrib/dev/acpica/components/executer/exresolv.c optional acpi
contrib/dev/acpica/components/executer/exresop.c optional acpi
contrib/dev/acpica/components/executer/exserial.c optional acpi
contrib/dev/acpica/components/executer/exstore.c optional acpi
contrib/dev/acpica/components/executer/exstoren.c optional acpi
contrib/dev/acpica/components/executer/exstorob.c optional acpi
contrib/dev/acpica/components/executer/exsystem.c optional acpi
contrib/dev/acpica/components/executer/extrace.c optional acpi
contrib/dev/acpica/components/executer/exutils.c optional acpi
contrib/dev/acpica/components/hardware/hwacpi.c optional acpi
contrib/dev/acpica/components/hardware/hwesleep.c optional acpi
contrib/dev/acpica/components/hardware/hwgpe.c optional acpi
contrib/dev/acpica/components/hardware/hwpci.c optional acpi
contrib/dev/acpica/components/hardware/hwregs.c optional acpi
contrib/dev/acpica/components/hardware/hwsleep.c optional acpi
contrib/dev/acpica/components/hardware/hwtimer.c optional acpi
contrib/dev/acpica/components/hardware/hwvalid.c optional acpi
contrib/dev/acpica/components/hardware/hwxface.c optional acpi
contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi
contrib/dev/acpica/components/namespace/nsaccess.c optional acpi \
compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/dev/acpica/components/namespace/nsalloc.c optional acpi
contrib/dev/acpica/components/namespace/nsarguments.c optional acpi
contrib/dev/acpica/components/namespace/nsconvert.c optional acpi
contrib/dev/acpica/components/namespace/nsdump.c optional acpi
contrib/dev/acpica/components/namespace/nseval.c optional acpi
contrib/dev/acpica/components/namespace/nsinit.c optional acpi
contrib/dev/acpica/components/namespace/nsload.c optional acpi
contrib/dev/acpica/components/namespace/nsnames.c optional acpi
contrib/dev/acpica/components/namespace/nsobject.c optional acpi
contrib/dev/acpica/components/namespace/nsparse.c optional acpi
contrib/dev/acpica/components/namespace/nspredef.c optional acpi
contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi
contrib/dev/acpica/components/namespace/nsrepair.c optional acpi
contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi
contrib/dev/acpica/components/namespace/nssearch.c optional acpi
contrib/dev/acpica/components/namespace/nsutils.c optional acpi
contrib/dev/acpica/components/namespace/nswalk.c optional acpi
contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi
contrib/dev/acpica/components/namespace/nsxfname.c optional acpi
contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi
contrib/dev/acpica/components/parser/psargs.c optional acpi
contrib/dev/acpica/components/parser/psloop.c optional acpi
contrib/dev/acpica/components/parser/psobject.c optional acpi
contrib/dev/acpica/components/parser/psopcode.c optional acpi
contrib/dev/acpica/components/parser/psopinfo.c optional acpi
contrib/dev/acpica/components/parser/psparse.c optional acpi
contrib/dev/acpica/components/parser/psscope.c optional acpi
contrib/dev/acpica/components/parser/pstree.c optional acpi
contrib/dev/acpica/components/parser/psutils.c optional acpi
contrib/dev/acpica/components/parser/pswalk.c optional acpi
contrib/dev/acpica/components/parser/psxface.c optional acpi
contrib/dev/acpica/components/resources/rsaddr.c optional acpi
contrib/dev/acpica/components/resources/rscalc.c optional acpi
contrib/dev/acpica/components/resources/rscreate.c optional acpi
contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug
contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi
contrib/dev/acpica/components/resources/rsinfo.c optional acpi
contrib/dev/acpica/components/resources/rsio.c optional acpi
contrib/dev/acpica/components/resources/rsirq.c optional acpi
contrib/dev/acpica/components/resources/rslist.c optional acpi
contrib/dev/acpica/components/resources/rsmemory.c optional acpi
contrib/dev/acpica/components/resources/rsmisc.c optional acpi
contrib/dev/acpica/components/resources/rsserial.c optional acpi
contrib/dev/acpica/components/resources/rsutils.c optional acpi
contrib/dev/acpica/components/resources/rsxface.c optional acpi
contrib/dev/acpica/components/tables/tbdata.c optional acpi
contrib/dev/acpica/components/tables/tbfadt.c optional acpi
contrib/dev/acpica/components/tables/tbfind.c optional acpi
contrib/dev/acpica/components/tables/tbinstal.c optional acpi
contrib/dev/acpica/components/tables/tbprint.c optional acpi
contrib/dev/acpica/components/tables/tbutils.c optional acpi
contrib/dev/acpica/components/tables/tbxface.c optional acpi
contrib/dev/acpica/components/tables/tbxfload.c optional acpi
contrib/dev/acpica/components/tables/tbxfroot.c optional acpi
contrib/dev/acpica/components/utilities/utaddress.c optional acpi
contrib/dev/acpica/components/utilities/utalloc.c optional acpi
contrib/dev/acpica/components/utilities/utascii.c optional acpi
contrib/dev/acpica/components/utilities/utbuffer.c optional acpi
contrib/dev/acpica/components/utilities/utcache.c optional acpi
contrib/dev/acpica/components/utilities/utcksum.c optional acpi
contrib/dev/acpica/components/utilities/utcopy.c optional acpi
contrib/dev/acpica/components/utilities/utdebug.c optional acpi
contrib/dev/acpica/components/utilities/utdecode.c optional acpi
contrib/dev/acpica/components/utilities/utdelete.c optional acpi
contrib/dev/acpica/components/utilities/uterror.c optional acpi
contrib/dev/acpica/components/utilities/uteval.c optional acpi
contrib/dev/acpica/components/utilities/utexcep.c optional acpi
contrib/dev/acpica/components/utilities/utglobal.c optional acpi
contrib/dev/acpica/components/utilities/uthex.c optional acpi
contrib/dev/acpica/components/utilities/utids.c optional acpi
contrib/dev/acpica/components/utilities/utinit.c optional acpi
contrib/dev/acpica/components/utilities/utlock.c optional acpi
contrib/dev/acpica/components/utilities/utmath.c optional acpi
contrib/dev/acpica/components/utilities/utmisc.c optional acpi
contrib/dev/acpica/components/utilities/utmutex.c optional acpi
contrib/dev/acpica/components/utilities/utnonansi.c optional acpi
contrib/dev/acpica/components/utilities/utobject.c optional acpi
contrib/dev/acpica/components/utilities/utosi.c optional acpi
contrib/dev/acpica/components/utilities/utownerid.c optional acpi
contrib/dev/acpica/components/utilities/utpredef.c optional acpi
contrib/dev/acpica/components/utilities/utresdecode.c optional acpi acpi_debug
contrib/dev/acpica/components/utilities/utresrc.c optional acpi
contrib/dev/acpica/components/utilities/utstate.c optional acpi
contrib/dev/acpica/components/utilities/utstring.c optional acpi
contrib/dev/acpica/components/utilities/utstrsuppt.c optional acpi
contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi
contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug
contrib/dev/acpica/components/utilities/utxface.c optional acpi
contrib/dev/acpica/components/utilities/utxferror.c optional acpi
contrib/dev/acpica/components/utilities/utxfinit.c optional acpi
contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug
netpfil/ipfilter/netinet/fil.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_auth.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_frag.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_log.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_nat.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_proxy.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_state.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_lookup.c optional ipfilter inet \
compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_pool.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_htable.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter ${NO_WTAUTOLOGICAL_POINTER_COMPARE}"
netpfil/ipfilter/netinet/ip_sync.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_nat6.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_rules.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_scan.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \
compile-with "${NORMAL_C} -Wno-unused -I$S/netpfil/ipfilter"
netpfil/ipfilter/netinet/radix_ipf.c optional ipfilter inet \
compile-with "${NORMAL_C} -I$S/netpfil/ipfilter"
contrib/libfdt/fdt.c optional fdt
contrib/libfdt/fdt_ro.c optional fdt
contrib/libfdt/fdt_rw.c optional fdt
contrib/libfdt/fdt_strerror.c optional fdt
contrib/libfdt/fdt_sw.c optional fdt
contrib/libfdt/fdt_wip.c optional fdt
contrib/libnv/cnvlist.c standard
contrib/libnv/dnvlist.c standard
contrib/libnv/nvlist.c standard
contrib/libnv/bsd_nvpair.c standard
# xz
dev/xz/xz_mod.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_crc32.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_crc64.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c optional xz \
compile-with "${NORMAL_C} -DXZ_USE_CRC64 -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/"
# Zstd
contrib/zstd/lib/freebsd/zstd_kmalloc.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/zstd_common.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/fse_decompress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/entropy_common.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/error_private.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/common/xxhash.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_literals.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_sequences.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_compress_superblock.c optional zstdio compile-with "${ZSTD_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
contrib/zstd/lib/compress/fse_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/hist.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/huf_compress.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_double_fast.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_fast.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_lazy.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_ldm.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/compress/zstd_opt.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/decompress/zstd_ddict.c optional zstdio compile-with ${ZSTD_C}
contrib/zstd/lib/decompress/zstd_decompress.c optional zstdio compile-with ${ZSTD_C}
# See comment in sys/conf/kern.pre.mk
contrib/zstd/lib/decompress/zstd_decompress_block.c optional zstdio \
compile-with "${ZSTD_C} ${ZSTD_DECOMPRESS_BLOCK_FLAGS}"
contrib/zstd/lib/decompress/huf_decompress.c optional zstdio compile-with "${ZSTD_C} ${NO_WBITWISE_INSTEAD_OF_LOGICAL}"
# Blake 2
contrib/libb2/blake2b-ref.c optional crypto | !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
contrib/libb2/blake2s-ref.c optional crypto \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual -DSUFFIX=_ref -Wno-unused-function"
crypto/blake2/blake2-sw.c optional crypto \
compile-with "${NORMAL_C} -I$S/crypto/blake2 -Wno-cast-qual"
crypto/camellia/camellia.c optional crypto
crypto/camellia/camellia-api.c optional crypto
crypto/chacha20/chacha.c standard
crypto/chacha20/chacha-sw.c optional crypto
crypto/chacha20_poly1305.c optional crypto
crypto/curve25519.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
crypto/des/des_ecb.c optional netsmb
crypto/des/des_setkey.c optional netsmb
crypto/openssl/ossl.c optional ossl
crypto/openssl/ossl_aes.c optional ossl
crypto/openssl/ossl_chacha20.c optional ossl
crypto/openssl/ossl_poly1305.c optional ossl
crypto/openssl/ossl_sha1.c optional ossl
crypto/openssl/ossl_sha256.c optional ossl
crypto/openssl/ossl_sha512.c optional ossl
crypto/rc4/rc4.c optional netgraph_mppc_encryption
crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \
!random_loadable | wlan_ccmp
crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | !random_loadable
crypto/rijndael/rijndael-api.c optional crypto | wlan_ccmp
crypto/sha1.c optional carp | crypto | ether | \
netgraph_mppc_encryption | sctp
crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | \
!random_loadable | sctp | zfs
crypto/sha2/sha512c.c optional crypto | geom_bde | zfs
crypto/skein/skein.c optional crypto | zfs
crypto/skein/skein_block.c optional crypto | zfs
crypto/siphash/siphash.c optional inet | inet6 | wg
crypto/siphash/siphash_test.c optional inet | inet6 | wg
ddb/db_access.c optional ddb
ddb/db_break.c optional ddb
ddb/db_capture.c optional ddb
ddb/db_command.c optional ddb
ddb/db_examine.c optional ddb
ddb/db_expr.c optional ddb
ddb/db_input.c optional ddb
ddb/db_lex.c optional ddb
ddb/db_main.c optional ddb
ddb/db_output.c optional ddb
ddb/db_print.c optional ddb
ddb/db_ps.c optional ddb
ddb/db_run.c optional ddb
ddb/db_script.c optional ddb
ddb/db_sym.c optional ddb
ddb/db_thread.c optional ddb
ddb/db_textdump.c optional ddb
ddb/db_variables.c optional ddb
ddb/db_watch.c optional ddb
ddb/db_write_cmd.c optional ddb
dev/aac/aac.c optional aac
dev/aac/aac_cam.c optional aacp aac
dev/aac/aac_debug.c optional aac
dev/aac/aac_disk.c optional aac
dev/aac/aac_pci.c optional aac pci
dev/aacraid/aacraid.c optional aacraid
dev/aacraid/aacraid_cam.c optional aacraid scbus
dev/aacraid/aacraid_debug.c optional aacraid
dev/aacraid/aacraid_pci.c optional aacraid pci
dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi
dev/acpi_support/acpi_asus.c optional acpi_asus acpi
dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi
dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi
dev/acpi_support/acpi_hp.c optional acpi_hp acpi
dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi
dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi
dev/acpi_support/acpi_sony.c optional acpi_sony acpi
dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi
dev/acpi_support/atk0110.c optional aibs acpi
dev/acpica/Osd/OsdDebug.c optional acpi
dev/acpica/Osd/OsdHardware.c optional acpi
dev/acpica/Osd/OsdInterrupt.c optional acpi
dev/acpica/Osd/OsdMemory.c optional acpi
dev/acpica/Osd/OsdSchedule.c optional acpi
dev/acpica/Osd/OsdStream.c optional acpi
dev/acpica/Osd/OsdSynch.c optional acpi
dev/acpica/Osd/OsdTable.c optional acpi
dev/acpica/acpi.c optional acpi
dev/acpica/acpi_acad.c optional acpi
dev/acpica/acpi_apei.c optional acpi
dev/acpica/acpi_battery.c optional acpi
dev/acpica/acpi_button.c optional acpi
dev/acpica/acpi_cmbat.c optional acpi
dev/acpica/acpi_cpu.c optional acpi
dev/acpica/acpi_ec.c optional acpi
dev/acpica/acpi_ged.c optional acpi_ged acpi
dev/acpica/acpi_isab.c optional acpi isa
dev/acpica/acpi_lid.c optional acpi
dev/acpica/acpi_package.c optional acpi
dev/acpica/acpi_perf.c optional acpi
dev/acpica/acpi_powerres.c optional acpi
dev/acpica/acpi_quirk.c optional acpi
dev/acpica/acpi_resource.c optional acpi
dev/acpica/acpi_container.c optional acpi
dev/acpica/acpi_smbat.c optional acpi
dev/acpica/acpi_thermal.c optional acpi
dev/acpica/acpi_throttle.c optional acpi
dev/acpica/acpi_video.c optional acpi_video acpi
dev/acpica/acpi_dock.c optional acpi_dock acpi
dev/adlink/adlink.c optional adlink
dev/ae/if_ae.c optional ae pci
dev/age/if_age.c optional age pci
dev/agp/agp.c optional agp pci
dev/agp/agp_if.m optional agp pci
dev/ahci/ahci.c optional ahci
dev/ahci/ahciem.c optional ahci
dev/ahci/ahci_pci.c optional ahci pci
dev/aic7xxx/ahc_isa.c optional ahc isa
dev/aic7xxx/ahc_pci.c optional ahc pci \
compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}"
dev/aic7xxx/ahd_pci.c optional ahd pci \
compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}"
dev/aic7xxx/aic7770.c optional ahc
dev/aic7xxx/aic79xx.c optional ahd pci
dev/aic7xxx/aic79xx_osm.c optional ahd pci
dev/aic7xxx/aic79xx_pci.c optional ahd pci
dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print
dev/aic7xxx/aic7xxx.c optional ahc
dev/aic7xxx/aic7xxx_93cx6.c optional ahc
dev/aic7xxx/aic7xxx_osm.c optional ahc
dev/aic7xxx/aic7xxx_pci.c optional ahc pci
dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print
dev/al_eth/al_eth.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/al_eth/al_init_eth_lm.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/al_eth/al_init_eth_kr.c optional al_eth fdt \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_iofic.c optional al_iofic \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_config.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_hal_udma_main.c optional al_udma \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/al_serdes.c optional al_serdes \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \
no-depend \
compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}"
dev/alc/if_alc.c optional alc pci
dev/ale/if_ale.c optional ale pci
dev/alpm/alpm.c optional alpm pci
dev/altera/avgen/altera_avgen.c optional altera_avgen
dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt
dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen
dev/altera/msgdma/msgdma.c optional altera_msgdma xdma
dev/altera/sdcard/altera_sdcard.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard
dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt
dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard
dev/altera/softdma/softdma.c optional altera_softdma xdma fdt
dev/altera/pio/pio.c optional altera_pio
dev/altera/pio/pio_if.m optional altera_pio
dev/amdpm/amdpm.c optional amdpm pci | nfpm pci
dev/amdsmb/amdsmb.c optional amdsmb pci
#
dev/ata/ata_if.m optional ata | atacore
dev/ata/ata-all.c optional ata | atacore
dev/ata/ata-dma.c optional ata | atacore
dev/ata/ata-lowlevel.c optional ata | atacore
dev/ata/ata-sata.c optional ata | atacore
dev/ata/ata-isa.c optional ata isa | ataisa
dev/ata/ata-pci.c optional ata pci | atapci
dev/ata/chipsets/ata-acard.c optional ata pci | ataacard
dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs
dev/ata/chipsets/ata-amd.c optional ata pci | ataamd
dev/ata/chipsets/ata-ati.c optional ata pci | ataati
dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek
dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress
dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix
dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint
dev/ata/chipsets/ata-intel.c optional ata pci | ataintel
dev/ata/chipsets/ata-ite.c optional ata pci | ataite
dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron
dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell
dev/ata/chipsets/ata-micron.c optional ata pci | atamicron
dev/ata/chipsets/ata-national.c optional ata pci | atanational
dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell
dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia
dev/ata/chipsets/ata-promise.c optional ata pci | atapromise
dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks
dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati
dev/ata/chipsets/ata-sis.c optional ata pci | atasis
dev/ata/chipsets/ata-via.c optional ata pci | atavia
#
dev/ath/if_ath_pci.c optional ath_pci pci \
compile-with "${ATH_C}"
#
dev/ath/if_ath_ahb.c optional ath_ahb \
compile-with "${ATH_C}"
#
dev/ath/if_ath.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_alq.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_beacon.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_btcoex.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_btcoex_mci.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_debug.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_descdma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_keycache.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_ioctl.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_led.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_lna_div.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx_edma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tx_ht.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_tdma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_sysctl.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_rx.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_rx_edma.c optional ath \
compile-with "${ATH_C}"
dev/ath/if_ath_spectral.c optional ath \
compile-with "${ATH_C}"
dev/ath/ah_osdep.c optional ath \
compile-with "${ATH_C}"
#
dev/ath/ath_hal/ah.c optional ath \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v14.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_v4k.c \
optional ath_hal | ath_ar9285 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_eeprom_9287.c \
optional ath_hal | ath_ar9287 \
compile-with "${ATH_C}"
dev/ath/ath_hal/ah_regdomain.c optional ath \
compile-with "${ATH_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW}"
# ar5210
dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5211
dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5212
dev/ath/ath_hal/ar5212/ar5212_ani.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_attach.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_beacon.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_eeprom.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_gpio.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_interrupts.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_keycache.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_misc.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_phy.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_power.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_recv.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_reset.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_rfgain.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5212_xmit.c \
optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \
ath_ar9285 ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar5416 (depends on ar5212)
dev/ath/ath_hal/ar5416/ar5416_ani.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_attach.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_beacon.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_btcoex.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_eeprom.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_gpio.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_interrupts.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_keycache.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_misc.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_phy.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_power.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_radar.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_recv.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_reset.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_spectral.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar5416_xmit.c \
optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \
ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130
#
# Since this is an embedded MAC SoC, there's no need to compile it into the
# default HAL.
dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9160 (depends on ar5416)
dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9280 (depends on ar5416)
dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \
ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \
ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9285 (depends on ar5416 and ar9280)
dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9287 (depends on ar5416)
dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ar9300
contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}"
contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function"
contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal"
# rf backends
dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \
ath_ar9130 | ath_ar9160 | ath_ar9280 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \
compile-with "${ATH_C} -I$S/dev/ath/ath_hal"
# ath rate control algorithms
dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \
compile-with "${ATH_C}"
dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \
compile-with "${ATH_C}"
dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \
compile-with "${ATH_C}"
# ath DFS modules
dev/ath/ath_dfs/null/dfs_null.c optional ath \
compile-with "${ATH_C}"
#
dev/backlight/backlight_if.m optional backlight | compat_linuxkpi
dev/backlight/backlight.c optional backlight | compat_linuxkpi
dev/bce/if_bce.c optional bce
dev/bfe/if_bfe.c optional bfe
dev/bge/if_bge.c optional bge
dev/bhnd/bhnd.c optional bhnd
dev/bhnd/bhnd_erom.c optional bhnd
dev/bhnd/bhnd_erom_if.m optional bhnd
dev/bhnd/bhnd_subr.c optional bhnd
dev/bhnd/bhnd_bus_if.m optional bhnd
dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd
dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd
dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd
dev/bhnd/bhndb/bhndb_pci.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb_pci bhndb bhnd pci
dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd
dev/bhnd/bcma/bcma.c optional bcma bhnd
dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb
dev/bhnd/bcma/bcma_erom.c optional bcma bhnd
dev/bhnd/bcma/bcma_subr.c optional bcma bhnd
dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd
dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd
dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd
dev/bhnd/cores/chipc/chipc.c optional bhnd
dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi
dev/bhnd/cores/chipc/chipc_gpio.c optional bhnd gpio
dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus
dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus
dev/bhnd/cores/chipc/chipc_subr.c optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_if.m optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_hostb_if.m optional bhnd
dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd
dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci
dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci
dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci
dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci
dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd
dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd
dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd
dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd
dev/bhnd/nvram/bhnd_sprom.c optional bhnd
dev/bhnd/siba/siba.c optional siba bhnd
dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb
dev/bhnd/siba/siba_erom.c optional siba bhnd
dev/bhnd/siba/siba_subr.c optional siba bhnd
#
dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci
dev/bnxt/bnxt_mgmt.c optional bnxt iflib pci
dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci
dev/bnxt/bnxt_txrx.c optional bnxt iflib pci
dev/bnxt/if_bnxt.c optional bnxt iflib pci
dev/bwi/bwimac.c optional bwi
dev/bwi/bwiphy.c optional bwi
dev/bwi/bwirf.c optional bwi
dev/bwi/if_bwi.c optional bwi
dev/bwi/if_bwi_pci.c optional bwi pci
dev/bwn/if_bwn.c optional bwn bhnd
dev/bwn/if_bwn_pci.c optional bwn pci bhnd bhndb bhndb_pci
dev/bwn/if_bwn_phy_common.c optional bwn bhnd
dev/bwn/if_bwn_phy_g.c optional bwn bhnd
dev/bwn/if_bwn_phy_lp.c optional bwn bhnd
dev/bwn/if_bwn_phy_n.c optional bwn bhnd
dev/bwn/if_bwn_util.c optional bwn bhnd
dev/cadence/if_cgem.c optional cgem fdt
dev/cardbus/card_if.m standard
dev/cardbus/cardbus.c optional cardbus
dev/cardbus/cardbus_cis.c optional cardbus
dev/cardbus/cardbus_device.c optional cardbus
dev/cardbus/power_if.m standard
dev/cas/if_cas.c optional cas
dev/cfi/cfi_bus_fdt.c optional cfi fdt
dev/cfi/cfi_bus_nexus.c optional cfi
dev/cfi/cfi_core.c optional cfi
dev/cfi/cfi_dev.c optional cfi
dev/cfi/cfi_disk.c optional cfid
dev/chromebook_platform/chromebook_platform.c optional chromebook_platform
dev/ciss/ciss.c optional ciss
dev/cpufreq/ichss.c optional cpufreq pci
dev/cxgb/cxgb_main.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/cxgb_sge.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_mc5.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/sys/uipc_mvec.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgbe/t4_clip.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_filter.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_if.m optional cxgbe pci
dev/cxgbe/t4_iov.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_mp_ring.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_main.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_netmap.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_sched.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_sge.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_smt.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_l2t.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_tracer.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_vf.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4_hw.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t6_kern_tls.c optional cxgbe pci kern_tls \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/crypto/t4_keyctx.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_flash_utils.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_lib.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/cudbg_wtp.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/fastlz.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/cudbg/fastlz_api.c optional cxgbe \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
t4fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t4fw_cfg.c"
t4fw_cfg.fwo optional cxgbe \
dependency "t4fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw_cfg.fwo"
t4fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw_cfg.fw"
t4fw_cfg_uwire.fwo optional cxgbe \
dependency "t4fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw_cfg_uwire.fwo"
t4fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw_cfg_uwire.fw"
t4fw.fwo optional cxgbe \
dependency "t4fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t4fw.fwo"
t4fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t4fw-1.27.3.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t4fw.fw"
t5fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw_cfg_uwire.fw:t5fw_cfg_uwire t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t5fw_cfg.c"
t5fw_cfg.fwo optional cxgbe \
dependency "t5fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw_cfg.fwo"
t5fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw_cfg.fw"
t5fw_cfg_uwire.fwo optional cxgbe \
dependency "t5fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw_cfg_uwire.fwo"
t5fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw_cfg_uwire.fw"
t5fw.fwo optional cxgbe \
dependency "t5fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t5fw.fwo"
t5fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t5fw-1.27.3.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t5fw.fw"
t6fw_cfg.c optional cxgbe \
compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw_cfg_uwire.fw:t6fw_cfg_uwire t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "t6fw_cfg.c"
t6fw_cfg.fwo optional cxgbe \
dependency "t6fw_cfg.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw_cfg.fwo"
t6fw_cfg.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw_cfg.fw"
t6fw_cfg_uwire.fwo optional cxgbe \
dependency "t6fw_cfg_uwire.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw_cfg_uwire.fwo"
t6fw_cfg_uwire.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw_cfg_uwire.txt" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw_cfg_uwire.fw"
t6fw.fwo optional cxgbe \
dependency "t6fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "t6fw.fwo"
t6fw.fw optional cxgbe \
dependency "$S/dev/cxgbe/firmware/t6fw-1.27.3.0.bin" \
compile-with "${CP} ${.ALLSRC} ${.TARGET}" \
no-obj no-implicit-rule \
clean "t6fw.fw"
dev/cxgbe/crypto/t4_crypto.c optional ccr \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cyapa/cyapa.c optional cyapa iicbus
dev/dc/if_dc.c optional dc pci
dev/dc/dcphy.c optional dc pci
dev/dc/pnphy.c optional dc pci
dev/dcons/dcons.c optional dcons
dev/dcons/dcons_crom.c optional dcons_crom
dev/dcons/dcons_os.c optional dcons
dev/dialog/da9063/da9063_if.m optional da9063_pmic
dev/dialog/da9063/da9063_iic.c optional da9063_pmic iicbus fdt
dev/dialog/da9063/da9063_rtc.c optional da9063_rtc fdt
dev/drm2/drm_agpsupport.c optional drm2
dev/drm2/drm_auth.c optional drm2
dev/drm2/drm_bufs.c optional drm2
dev/drm2/drm_buffer.c optional drm2
dev/drm2/drm_context.c optional drm2
dev/drm2/drm_crtc.c optional drm2
dev/drm2/drm_crtc_helper.c optional drm2
dev/drm2/drm_dma.c optional drm2
dev/drm2/drm_dp_helper.c optional drm2
dev/drm2/drm_dp_iic_helper.c optional drm2
dev/drm2/drm_drv.c optional drm2
dev/drm2/drm_edid.c optional drm2
dev/drm2/drm_fb_helper.c optional drm2
dev/drm2/drm_fops.c optional drm2
dev/drm2/drm_gem.c optional drm2
dev/drm2/drm_gem_names.c optional drm2
dev/drm2/drm_global.c optional drm2
dev/drm2/drm_hashtab.c optional drm2
dev/drm2/drm_ioctl.c optional drm2
dev/drm2/drm_irq.c optional drm2
dev/drm2/drm_linux_list_sort.c optional drm2
dev/drm2/drm_lock.c optional drm2
dev/drm2/drm_memory.c optional drm2
dev/drm2/drm_mm.c optional drm2
dev/drm2/drm_modes.c optional drm2
dev/drm2/drm_pci.c optional drm2
dev/drm2/drm_platform.c optional drm2
dev/drm2/drm_scatter.c optional drm2
dev/drm2/drm_stub.c optional drm2
dev/drm2/drm_sysctl.c optional drm2
dev/drm2/drm_vm.c optional drm2
dev/drm2/drm_os_freebsd.c optional drm2
dev/drm2/ttm/ttm_agp_backend.c optional drm2
dev/drm2/ttm/ttm_lock.c optional drm2
dev/drm2/ttm/ttm_object.c optional drm2
dev/drm2/ttm/ttm_tt.c optional drm2
dev/drm2/ttm/ttm_bo_util.c optional drm2
dev/drm2/ttm/ttm_bo.c optional drm2
dev/drm2/ttm/ttm_bo_manager.c optional drm2
dev/drm2/ttm/ttm_execbuf_util.c optional drm2
dev/drm2/ttm/ttm_memory.c optional drm2
dev/drm2/ttm/ttm_page_alloc.c optional drm2
dev/drm2/ttm/ttm_bo_vm.c optional drm2
dev/efidev/efidev.c optional efirt
dev/efidev/efirt.c optional efirt
dev/efidev/efirtc.c optional efirt
dev/e1000/if_em.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/em_txrx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/igb_txrx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_80003es2lan.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82540.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82541.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82542.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82543.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82571.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_82575.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_ich8lan.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_i210.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_api.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_base.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_mac.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_manage.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_nvm.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_phy.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_vf.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_mbx.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/e1000/e1000_osdep.c optional em \
compile-with "${NORMAL_C} -I$S/dev/e1000"
dev/et/if_et.c optional et
dev/ena/ena.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_datapath.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_netmap.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_rss.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
dev/ena/ena_sysctl.c optional ena \
compile-with "${NORMAL_C} -I$S/contrib"
contrib/ena-com/ena_com.c optional ena
contrib/ena-com/ena_eth_com.c optional ena
dev/etherswitch/arswitch/arswitch.c optional arswitch
dev/etherswitch/arswitch/arswitch_reg.c optional arswitch
dev/etherswitch/arswitch/arswitch_phy.c optional arswitch
dev/etherswitch/arswitch/arswitch_8216.c optional arswitch
dev/etherswitch/arswitch/arswitch_8226.c optional arswitch
dev/etherswitch/arswitch/arswitch_8316.c optional arswitch
dev/etherswitch/arswitch/arswitch_8327.c optional arswitch
dev/etherswitch/arswitch/arswitch_7240.c optional arswitch
dev/etherswitch/arswitch/arswitch_9340.c optional arswitch
dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch
dev/etherswitch/etherswitch.c optional etherswitch
dev/etherswitch/etherswitch_if.m optional etherswitch
dev/etherswitch/ip17x/ip17x.c optional ip17x
dev/etherswitch/ip17x/ip175c.c optional ip17x
dev/etherswitch/ip17x/ip175d.c optional ip17x
dev/etherswitch/ip17x/ip17x_phy.c optional ip17x
dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x
dev/etherswitch/miiproxy.c optional miiproxy
dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb
dev/etherswitch/e6000sw/e6000sw.c optional e6000sw fdt
dev/etherswitch/e6000sw/e6060sw.c optional e6060sw
dev/etherswitch/infineon/adm6996fc.c optional adm6996fc
dev/etherswitch/micrel/ksz8995ma.c optional ksz8995ma
dev/etherswitch/ukswitch/ukswitch.c optional ukswitch
dev/evdev/cdev.c optional evdev
dev/evdev/evdev.c optional evdev
dev/evdev/evdev_mt.c optional evdev
dev/evdev/evdev_utils.c optional evdev
dev/evdev/uinput.c optional evdev uinput
dev/exca/exca.c optional cbb
dev/extres/clk/clk.c optional clk
dev/extres/clk/clkdev_if.m optional clk
dev/extres/clk/clknode_if.m optional clk
dev/extres/clk/clk_bus.c optional clk fdt
dev/extres/clk/clk_div.c optional clk
dev/extres/clk/clk_fixed.c optional clk
dev/extres/clk/clk_gate.c optional clk
dev/extres/clk/clk_link.c optional clk
dev/extres/clk/clk_mux.c optional clk
dev/extres/phy/phy.c optional phy
dev/extres/phy/phydev_if.m optional phy fdt
dev/extres/phy/phynode_if.m optional phy
dev/extres/phy/phy_usb.c optional phy
dev/extres/phy/phynode_usb_if.m optional phy
dev/extres/hwreset/hwreset.c optional hwreset
dev/extres/hwreset/hwreset_array.c optional hwreset
dev/extres/hwreset/hwreset_if.m optional hwreset
dev/extres/nvmem/nvmem.c optional nvmem fdt
dev/extres/nvmem/nvmem_if.m optional nvmem
dev/extres/regulator/regdev_if.m optional regulator fdt
dev/extres/regulator/regnode_if.m optional regulator
dev/extres/regulator/regulator.c optional regulator
dev/extres/regulator/regulator_bus.c optional regulator fdt
dev/extres/regulator/regulator_fixed.c optional regulator
dev/extres/syscon/syscon.c optional syscon
dev/extres/syscon/syscon_generic.c optional syscon fdt
dev/extres/syscon/syscon_if.m optional syscon
dev/extres/syscon/syscon_power.c optional syscon syscon_power
dev/fb/fbd.c optional fbd | vt
dev/fb/fb_if.m standard
dev/fb/splash.c optional sc splash
dev/fdt/fdt_clock.c optional fdt fdt_clock
dev/fdt/fdt_clock_if.m optional fdt fdt_clock
dev/fdt/fdt_common.c optional fdt
dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl
dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl
dev/fdt/fdt_slicer.c optional fdt cfi | fdt mx25l | fdt n25q | fdt at45d
dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \
dependency "${FDT_DTS_FILE:T:R}.dtb"
dev/fdt/simplebus.c optional fdt
dev/fdt/simple_mfd.c optional syscon fdt
dev/filemon/filemon.c optional filemon
dev/firewire/firewire.c optional firewire
dev/firewire/fwcrom.c optional firewire
dev/firewire/fwdev.c optional firewire
dev/firewire/fwdma.c optional firewire
dev/firewire/fwmem.c optional firewire
dev/firewire/fwohci.c optional firewire
dev/firewire/fwohci_pci.c optional firewire pci
dev/firewire/if_fwe.c optional fwe
dev/firewire/if_fwip.c optional fwip
dev/firewire/sbp.c optional sbp
dev/firewire/sbp_targ.c optional sbp_targ
dev/flash/at45d.c optional at45d
dev/flash/cqspi.c optional cqspi fdt xdma
dev/flash/mx25l.c optional mx25l
dev/flash/n25q.c optional n25q fdt
dev/flash/qspi_if.m optional cqspi fdt | n25q fdt
dev/fxp/if_fxp.c optional fxp
dev/fxp/inphy.c optional fxp
dev/gem/if_gem.c optional gem
dev/gem/if_gem_pci.c optional gem pci
dev/gve/gve_adminq.c optional gve
dev/gve/gve_main.c optional gve
dev/gve/gve_qpl.c optional gve
dev/gve/gve_rx.c optional gve
dev/gve/gve_sysctl.c optional gve
dev/gve/gve_tx.c optional gve
dev/gve/gve_utils.c optional gve
dev/goldfish/goldfish_rtc.c optional goldfish_rtc fdt
dev/gpio/dwgpio/dwgpio.c optional gpio dwgpio fdt
dev/gpio/dwgpio/dwgpio_bus.c optional gpio dwgpio fdt
dev/gpio/dwgpio/dwgpio_if.m optional gpio dwgpio fdt
dev/gpio/gpiobacklight.c optional gpiobacklight fdt
dev/gpio/gpiokeys.c optional gpiokeys fdt
dev/gpio/gpiokeys_codes.c optional gpiokeys fdt
dev/gpio/gpiobus.c optional gpio \
dependency "gpiobus_if.h"
dev/gpio/gpioc.c optional gpio \
dependency "gpio_if.h"
dev/gpio/gpioiic.c optional gpioiic
dev/gpio/gpioled.c optional gpioled !fdt
dev/gpio/gpioled_fdt.c optional gpioled fdt
dev/gpio/gpiomdio.c optional gpiomdio mii_bitbang
dev/gpio/gpiopower.c optional gpiopower fdt
dev/gpio/gpioregulator.c optional gpioregulator fdt
dev/gpio/gpiospi.c optional gpiospi
dev/gpio/gpioths.c optional gpioths
dev/gpio/gpio_if.m optional gpio
dev/gpio/gpiobus_if.m optional gpio
dev/gpio/gpiopps.c optional gpiopps fdt
dev/gpio/ofw_gpiobus.c optional fdt gpio
dev/hid/bcm5974.c optional bcm5974
dev/hid/hconf.c optional hconf
dev/hid/hcons.c optional hcons
dev/hid/hgame.c optional hgame
dev/hid/hid.c optional hid
dev/hid/hid_if.m optional hid
dev/hid/hidbus.c optional hidbus
dev/hid/hidmap.c optional hidmap
dev/hid/hidquirk.c optional hid
dev/hid/hidraw.c optional hidraw
dev/hid/hkbd.c optional hkbd
dev/hid/hms.c optional hms
dev/hid/hmt.c optional hmt hconf
dev/hid/hpen.c optional hpen
dev/hid/hsctrl.c optional hsctrl
dev/hid/ietp.c optional ietp
dev/hid/ps4dshock.c optional ps4dshock
dev/hid/xb360gp.c optional xb360gp
dev/hifn/hifn7751.c optional hifn
dev/hptiop/hptiop.c optional hptiop scbus
dev/hwpmc/hwpmc_logging.c optional hwpmc
dev/hwpmc/hwpmc_mod.c optional hwpmc
dev/hwpmc/hwpmc_soft.c optional hwpmc
dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus
dev/ichiic/ig4_iic.c optional ig4 iicbus
dev/ichiic/ig4_pci.c optional ig4 pci iicbus
dev/ichsmb/ichsmb.c optional ichsmb
dev/ichsmb/ichsmb_pci.c optional ichsmb pci
dev/ida/ida.c optional ida
dev/ida/ida_disk.c optional ida
dev/ida/ida_pci.c optional ida pci
dev/iicbus/acpi_iicbus.c optional acpi iicbus | acpi compat_linuxkpi
dev/iicbus/ad7418.c optional ad7418
dev/iicbus/ads111x.c optional ads111x
dev/iicbus/ds1307.c optional ds1307
dev/iicbus/ds13rtc.c optional ds13rtc | ds133x | ds1374
dev/iicbus/ds1672.c optional ds1672
dev/iicbus/ds3231.c optional ds3231
dev/iicbus/htu21.c optional htu21
dev/iicbus/icee.c optional icee
dev/iicbus/if_ic.c optional ic
dev/iicbus/iic.c optional iic
dev/iicbus/iic_recover_bus.c optional iicbus | compat_linuxkpi
dev/iicbus/iicbb.c optional iicbb | compat_linuxkpi
dev/iicbus/iicbb_if.m optional iicbb | compat_linuxkpi
dev/iicbus/iicbus.c optional iicbus | compat_linuxkpi
dev/iicbus/iicbus_if.m optional iicbus | compat_linuxkpi
dev/iicbus/iichid.c optional iichid acpi hid iicbus
dev/iicbus/iiconf.c optional iicbus | compat_linuxkpi
dev/iicbus/iicsmb.c optional iicsmb \
dependency "iicbus_if.h"
dev/iicbus/iicoc.c optional iicoc
dev/iicbus/iicoc_fdt.c optional iicoc fdt
dev/iicbus/iicoc_pci.c optional iicoc pci
dev/iicbus/isl12xx.c optional isl12xx
dev/iicbus/lm75.c optional lm75
dev/iicbus/max44009.c optional max44009
dev/iicbus/mux/iicmux.c optional iicmux
dev/iicbus/mux/iicmux_if.m optional iicmux
dev/iicbus/mux/iic_gpiomux.c optional iic_gpiomux fdt
dev/iicbus/mux/ltc430x.c optional ltc430x
dev/iicbus/mux/pca954x.c optional pca954x iicbus iicmux
dev/iicbus/nxprtc.c optional nxprtc | pcf8563
dev/iicbus/ofw_iicbus.c optional fdt iicbus
dev/iicbus/ofw_iicbus_if.m optional fdt iicbus
dev/iicbus/pcf8574.c optional pcf8574
dev/iicbus/pcf8591.c optional pcf8591
dev/iicbus/rtc8583.c optional rtc8583
dev/iicbus/rtc/pcf85063.c optional pcf85063 iicbus fdt
dev/iicbus/rtc/rx8803.c optional rx8803 iicbus fdt
dev/iicbus/s35390a.c optional s35390a
dev/iicbus/sy8106a.c optional sy8106a fdt
dev/iicbus/syr827.c optional syr827 fdt
dev/iicbus/gpio/tca64xx.c optional tca64xx fdt gpio
dev/iicbus/pmic/fan53555.c optional fan53555 fdt | tcs4525 fdt
dev/igc/if_igc.c optional igc iflib pci
dev/igc/igc_api.c optional igc iflib pci
dev/igc/igc_base.c optional igc iflib pci
dev/igc/igc_i225.c optional igc iflib pci
dev/igc/igc_mac.c optional igc iflib pci
dev/igc/igc_nvm.c optional igc iflib pci
dev/igc/igc_phy.c optional igc iflib pci
dev/igc/igc_txrx.c optional igc iflib pci
dev/intpm/intpm.c optional intpm pci
# XXX Work around clang warning, until maintainer approves fix.
dev/ips/ips.c optional ips \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/ips/ips_commands.c optional ips
dev/ips/ips_disk.c optional ips
dev/ips/ips_ioctl.c optional ips
dev/ips/ips_pci.c optional ips pci
dev/ipw/if_ipw.c optional ipw
ipwbssfw.c optional ipwbssfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwbssfw.c"
ipw_bss.fwo optional ipwbssfw | ipwfw \
dependency "ipw_bss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_bss.fwo"
ipw_bss.fw optional ipwbssfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_bss.fw"
ipwibssfw.c optional ipwibssfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwibssfw.c"
ipw_ibss.fwo optional ipwibssfw | ipwfw \
dependency "ipw_ibss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_ibss.fwo"
ipw_ibss.fw optional ipwibssfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_ibss.fw"
ipwmonitorfw.c optional ipwmonitorfw | ipwfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "ipwmonitorfw.c"
ipw_monitor.fwo optional ipwmonitorfw | ipwfw \
dependency "ipw_monitor.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "ipw_monitor.fwo"
ipw_monitor.fw optional ipwmonitorfw | ipwfw \
dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "ipw_monitor.fw"
dev/iscsi/icl.c optional iscsi
dev/iscsi/icl_conn_if.m optional cfiscsi | iscsi
dev/iscsi/icl_soft.c optional iscsi
dev/iscsi/icl_soft_proxy.c optional iscsi
dev/iscsi/iscsi.c optional iscsi scbus
dev/ismt/ismt.c optional ismt
dev/isl/isl.c optional isl iicbus
dev/isp/isp.c optional isp
dev/isp/isp_freebsd.c optional isp
dev/isp/isp_library.c optional isp
dev/isp/isp_pci.c optional isp pci
dev/isp/isp_target.c optional isp
dev/ispfw/ispfw.c optional ispfw
dev/iwi/if_iwi.c optional iwi
iwibssfw.c optional iwibssfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwibssfw.c"
iwi_bss.fwo optional iwibssfw | iwifw \
dependency "iwi_bss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_bss.fwo"
iwi_bss.fw optional iwibssfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_bss.fw"
iwiibssfw.c optional iwiibssfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwiibssfw.c"
iwi_ibss.fwo optional iwiibssfw | iwifw \
dependency "iwi_ibss.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_ibss.fwo"
iwi_ibss.fw optional iwiibssfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_ibss.fw"
iwimonitorfw.c optional iwimonitorfw | iwifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwimonitorfw.c"
iwi_monitor.fwo optional iwimonitorfw | iwifw \
dependency "iwi_monitor.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwi_monitor.fwo"
iwi_monitor.fw optional iwimonitorfw | iwifw \
dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwi_monitor.fw"
dev/iwm/if_iwm.c optional iwm
dev/iwm/if_iwm_7000.c optional iwm
dev/iwm/if_iwm_8000.c optional iwm
dev/iwm/if_iwm_9000.c optional iwm
dev/iwm/if_iwm_9260.c optional iwm
dev/iwm/if_iwm_binding.c optional iwm
dev/iwm/if_iwm_fw.c optional iwm
dev/iwm/if_iwm_led.c optional iwm
dev/iwm/if_iwm_mac_ctxt.c optional iwm
dev/iwm/if_iwm_notif_wait.c optional iwm
dev/iwm/if_iwm_pcie_trans.c optional iwm
dev/iwm/if_iwm_phy_ctxt.c optional iwm
dev/iwm/if_iwm_phy_db.c optional iwm
dev/iwm/if_iwm_power.c optional iwm
dev/iwm/if_iwm_scan.c optional iwm
dev/iwm/if_iwm_sf.c optional iwm
dev/iwm/if_iwm_sta.c optional iwm
dev/iwm/if_iwm_time_event.c optional iwm
dev/iwm/if_iwm_util.c optional iwm
iwm3160fw.c optional iwm3160fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm3160fw.c"
iwm3160fw.fwo optional iwm3160fw | iwmfw \
dependency "iwm3160.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm3160fw.fwo"
iwm3160.fw optional iwm3160fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-3160-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm3160.fw"
iwm3168fw.c optional iwm3168fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3168.fw:iwm3168fw -miwm3168fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm3168fw.c"
iwm3168fw.fwo optional iwm3168fw | iwmfw \
dependency "iwm3168.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm3168fw.fwo"
iwm3168.fw optional iwm3168fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-3168-22.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm3168.fw"
iwm7260fw.c optional iwm7260fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7260fw.c"
iwm7260fw.fwo optional iwm7260fw | iwmfw \
dependency "iwm7260.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7260fw.fwo"
iwm7260.fw optional iwm7260fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7260-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7260.fw"
iwm7265fw.c optional iwm7265fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7265fw.c"
iwm7265fw.fwo optional iwm7265fw | iwmfw \
dependency "iwm7265.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7265fw.fwo"
iwm7265.fw optional iwm7265fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7265-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7265.fw"
iwm7265Dfw.c optional iwm7265Dfw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265D.fw:iwm7265Dfw -miwm7265Dfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm7265Dfw.c"
iwm7265Dfw.fwo optional iwm7265Dfw | iwmfw \
dependency "iwm7265D.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm7265Dfw.fwo"
iwm7265D.fw optional iwm7265Dfw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-7265D-17.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm7265D.fw"
iwm8000Cfw.c optional iwm8000Cfw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm8000Cfw.c"
iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \
dependency "iwm8000C.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm8000Cfw.fwo"
iwm8000C.fw optional iwm8000Cfw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm8000C.fw"
iwm8265.fw optional iwm8265fw | iwmfw \
dependency "$S/contrib/dev/iwm/iwm-8265-22.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwm8265.fw"
iwm8265fw.c optional iwm8265fw | iwmfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8265.fw:iwm8265fw -miwm8265fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwm8265fw.c"
iwm8265fw.fwo optional iwm8265fw | iwmfw \
dependency "iwm8265.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwm8265fw.fwo"
dev/iwn/if_iwn.c optional iwn
iwn1000fw.c optional iwn1000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn1000fw.c"
iwn1000fw.fwo optional iwn1000fw | iwnfw \
dependency "iwn1000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn1000fw.fwo"
iwn1000.fw optional iwn1000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn1000.fw"
iwn100fw.c optional iwn100fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn100fw.c"
iwn100fw.fwo optional iwn100fw | iwnfw \
dependency "iwn100.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn100fw.fwo"
iwn100.fw optional iwn100fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn100.fw"
iwn105fw.c optional iwn105fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn105fw.c"
iwn105fw.fwo optional iwn105fw | iwnfw \
dependency "iwn105.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn105fw.fwo"
iwn105.fw optional iwn105fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn105.fw"
iwn135fw.c optional iwn135fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn135fw.c"
iwn135fw.fwo optional iwn135fw | iwnfw \
dependency "iwn135.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn135fw.fwo"
iwn135.fw optional iwn135fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn135.fw"
iwn2000fw.c optional iwn2000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn2000fw.c"
iwn2000fw.fwo optional iwn2000fw | iwnfw \
dependency "iwn2000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn2000fw.fwo"
iwn2000.fw optional iwn2000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn2000.fw"
iwn2030fw.c optional iwn2030fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn2030fw.c"
iwn2030fw.fwo optional iwn2030fw | iwnfw \
dependency "iwn2030.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn2030fw.fwo"
iwn2030.fw optional iwn2030fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn2030.fw"
iwn4965fw.c optional iwn4965fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn4965fw.c"
iwn4965fw.fwo optional iwn4965fw | iwnfw \
dependency "iwn4965.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn4965fw.fwo"
iwn4965.fw optional iwn4965fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn4965.fw"
iwn5000fw.c optional iwn5000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn5000fw.c"
iwn5000fw.fwo optional iwn5000fw | iwnfw \
dependency "iwn5000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn5000fw.fwo"
iwn5000.fw optional iwn5000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn5000.fw"
iwn5150fw.c optional iwn5150fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn5150fw.c"
iwn5150fw.fwo optional iwn5150fw | iwnfw \
dependency "iwn5150.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn5150fw.fwo"
iwn5150.fw optional iwn5150fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn5150.fw"
iwn6000fw.c optional iwn6000fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000fw.c"
iwn6000fw.fwo optional iwn6000fw | iwnfw \
dependency "iwn6000.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000fw.fwo"
iwn6000.fw optional iwn6000fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000.fw"
iwn6000g2afw.c optional iwn6000g2afw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000g2afw.c"
iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \
dependency "iwn6000g2a.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000g2afw.fwo"
iwn6000g2a.fw optional iwn6000g2afw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000g2a.fw"
iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6000g2bfw.c"
iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \
dependency "iwn6000g2b.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6000g2bfw.fwo"
iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6000g2b.fw"
iwn6050fw.c optional iwn6050fw | iwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "iwn6050fw.c"
iwn6050fw.fwo optional iwn6050fw | iwnfw \
dependency "iwn6050.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "iwn6050fw.fwo"
iwn6050.fw optional iwn6050fw | iwnfw \
dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "iwn6050.fw"
dev/ixgbe/if_ix.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_ixv.c optional ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/if_bypass.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_fdir.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/if_sriov.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/jedec_dimm/jedec_dimm.c optional jedec_dimm smbus
dev/jme/if_jme.c optional jme pci
dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt | hkbd
dev/kbdmux/kbdmux.c optional kbdmux
dev/ksyms/ksyms.c optional ksyms
dev/le/am7990.c optional le
dev/le/am79900.c optional le
dev/le/if_le_pci.c optional le pci
dev/le/lance.c optional le
dev/led/led.c standard
dev/lge/if_lge.c optional lge
dev/liquidio/base/cn23xx_pf_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_console.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_ctrl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_device.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_droq.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_mem_ops.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_request_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/base/lio_response_manager.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_core.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_ioctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_main.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rss.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_rxtx.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
dev/liquidio/lio_sysctl.c optional lio \
compile-with "${NORMAL_C} \
-I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP"
lio.c optional lio \
compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "lio.c"
lio_23xx_nic.bin.fw.fwo optional lio \
dependency "lio_23xx_nic.bin.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "lio_23xx_nic.bin.fw.fwo"
lio_23xx_nic.bin.fw optional lio \
dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "lio_23xx_nic.bin.fw"
dev/malo/if_malo.c optional malo
dev/malo/if_malohal.c optional malo
dev/malo/if_malo_pci.c optional malo pci
dev/md/md.c optional md
dev/mdio/mdio_if.m optional miiproxy | mdio
dev/mdio/mdio.c optional miiproxy | mdio
dev/mem/memdev.c optional mem
dev/mem/memutil.c optional mem
dev/mfi/mfi.c optional mfi
dev/mfi/mfi_debug.c optional mfi
dev/mfi/mfi_pci.c optional mfi pci
dev/mfi/mfi_disk.c optional mfi
dev/mfi/mfi_syspd.c optional mfi
dev/mfi/mfi_tbolt.c optional mfi
dev/mfi/mfi_cam.c optional mfip scbus
dev/mii/acphy.c optional miibus | acphy
dev/mii/amphy.c optional miibus | amphy
dev/mii/atphy.c optional miibus | atphy
dev/mii/axphy.c optional miibus | axphy
dev/mii/bmtphy.c optional miibus | bmtphy
dev/mii/brgphy.c optional miibus | brgphy
dev/mii/ciphy.c optional miibus | ciphy
dev/mii/dp83822phy.c optional miibus | dp83822phy
dev/mii/dp83867phy.c optional miibus | dp83867phy
dev/mii/e1000phy.c optional miibus | e1000phy
dev/mii/gentbi.c optional miibus | gentbi
dev/mii/icsphy.c optional miibus | icsphy
dev/mii/ip1000phy.c optional miibus | ip1000phy
dev/mii/jmphy.c optional miibus | jmphy
dev/mii/lxtphy.c optional miibus | lxtphy
dev/mii/mcommphy.c optional miibus | mcommphy
dev/mii/micphy.c optional miibus fdt | micphy fdt
dev/mii/mii.c optional miibus | mii
dev/mii/mii_bitbang.c optional miibus | mii_bitbang
dev/mii/mii_physubr.c optional miibus | mii
dev/mii/mii_fdt.c optional miibus fdt | mii fdt
dev/mii/miibus_if.m optional miibus | mii
dev/mii/mv88e151x.c optional miibus | mv88e151x
dev/mii/nsgphy.c optional miibus | nsgphy
dev/mii/nsphy.c optional miibus | nsphy
dev/mii/nsphyter.c optional miibus | nsphyter
dev/mii/pnaphy.c optional miibus | pnaphy
dev/mii/qsphy.c optional miibus | qsphy
dev/mii/rdcphy.c optional miibus | rdcphy
dev/mii/rgephy.c optional miibus | rgephy
dev/mii/rlphy.c optional miibus | rlphy
dev/mii/rlswitch.c optional rlswitch
dev/mii/smcphy.c optional miibus | smcphy
dev/mii/smscphy.c optional miibus | smscphy
dev/mii/tdkphy.c optional miibus | tdkphy
dev/mii/truephy.c optional miibus | truephy
dev/mii/ukphy.c optional miibus | mii
dev/mii/ukphy_subr.c optional miibus | mii
dev/mii/vscphy.c optional miibus | vscphy
dev/mii/xmphy.c optional miibus | xmphy
dev/mlxfw/mlxfw_fsm.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlxfw/mlxfw_mfa2.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlxfw/mlxfw_mfa2_tlv_multi.c optional mlxfw \
compile-with "${MLXFW_C}"
dev/mlx/mlx.c optional mlx
dev/mlx/mlx_disk.c optional mlx
dev/mlx/mlx_pci.c optional mlx pci
dev/mmc/mmc_subr.c optional mmc | mmcsd !mmccam
dev/mmc/mmc.c optional mmc !mmccam
dev/mmc/mmcbr_if.m standard
dev/mmc/mmcbus_if.m standard
dev/mmc/mmcsd.c optional mmcsd !mmccam
dev/mmc/mmc_fdt_helpers.c optional mmc regulator clk fdt | mmccam regulator clk fdt
dev/mmc/mmc_helpers.c optional mmc gpio regulator clk | mmccam gpio regulator clk
dev/mmc/mmc_pwrseq.c optional mmc clk regulator fdt | mmccam clk regulator fdt
dev/mmc/mmc_pwrseq_if.m optional mmc clk regulator fdt | mmccam clk regulator fdt
dev/mmcnull/mmcnull.c optional mmcnull
dev/mpr/mpr.c optional mpr
dev/mpr/mpr_config.c optional mpr
# XXX Work around clang warning, until maintainer approves fix.
dev/mpr/mpr_mapping.c optional mpr \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/mpr/mpr_pci.c optional mpr pci
dev/mpr/mpr_sas.c optional mpr \
compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}"
dev/mpr/mpr_sas_lsi.c optional mpr
dev/mpr/mpr_table.c optional mpr
dev/mpr/mpr_user.c optional mpr
dev/mps/mps.c optional mps
dev/mps/mps_config.c optional mps
# XXX Work around clang warning, until maintainer approves fix.
dev/mps/mps_mapping.c optional mps \
compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}"
dev/mps/mps_pci.c optional mps pci
dev/mps/mps_sas.c optional mps \
compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}"
dev/mps/mps_sas_lsi.c optional mps
dev/mps/mps_table.c optional mps
dev/mps/mps_user.c optional mps
dev/mpt/mpt.c optional mpt
dev/mpt/mpt_cam.c optional mpt
dev/mpt/mpt_debug.c optional mpt
dev/mpt/mpt_pci.c optional mpt pci
dev/mpt/mpt_raid.c optional mpt
dev/mpt/mpt_user.c optional mpt
dev/mrsas/mrsas.c optional mrsas
dev/mrsas/mrsas_cam.c optional mrsas
dev/mrsas/mrsas_ioctl.c optional mrsas
dev/mrsas/mrsas_fp.c optional mrsas
dev/msk/if_msk.c optional msk
dev/mvs/mvs.c optional mvs
dev/mvs/mvs_if.m optional mvs
dev/mvs/mvs_pci.c optional mvs pci
dev/mwl/if_mwl.c optional mwl
dev/mwl/if_mwl_pci.c optional mwl pci
dev/mwl/mwlhal.c optional mwl
mwlfw.c optional mwlfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "mwlfw.c"
mw88W8363.fwo optional mwlfw \
dependency "mw88W8363.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "mw88W8363.fwo"
mw88W8363.fw optional mwlfw \
dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "mw88W8363.fw"
mwlboot.fwo optional mwlfw \
dependency "mwlboot.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "mwlboot.fwo"
mwlboot.fw optional mwlfw \
dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "mwlboot.fw"
dev/mxge/if_mxge.c optional mxge pci
dev/mxge/mxge_eth_z8e.c optional mxge pci
dev/mxge/mxge_ethp_z8e.c optional mxge pci
dev/mxge/mxge_rss_eth_z8e.c optional mxge pci
dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci
dev/my/if_my.c optional my
dev/netmap/if_ptnet.c optional netmap inet
dev/netmap/netmap.c optional netmap
dev/netmap/netmap_bdg.c optional netmap
dev/netmap/netmap_freebsd.c optional netmap
dev/netmap/netmap_generic.c optional netmap
dev/netmap/netmap_kloop.c optional netmap
dev/netmap/netmap_legacy.c optional netmap
dev/netmap/netmap_mbq.c optional netmap
dev/netmap/netmap_mem2.c optional netmap
dev/netmap/netmap_monitor.c optional netmap
dev/netmap/netmap_null.c optional netmap
dev/netmap/netmap_offloadings.c optional netmap
dev/netmap/netmap_pipe.c optional netmap
dev/netmap/netmap_vale.c optional netmap
# compile-with "${NORMAL_C} -Wconversion -Wextra"
dev/nfsmb/nfsmb.c optional nfsmb pci
dev/nge/if_nge.c optional nge
dev/nmdm/nmdm.c optional nmdm
dev/null/null.c standard
dev/nvd/nvd.c optional nvd nvme
dev/nvme/nvme.c optional nvme
dev/nvme/nvme_ahci.c optional nvme ahci
dev/nvme/nvme_ctrlr.c optional nvme
dev/nvme/nvme_ctrlr_cmd.c optional nvme
dev/nvme/nvme_ns.c optional nvme
dev/nvme/nvme_ns_cmd.c optional nvme
dev/nvme/nvme_pci.c optional nvme pci
dev/nvme/nvme_qpair.c optional nvme
dev/nvme/nvme_sim.c optional nvme scbus
dev/nvme/nvme_sysctl.c optional nvme
dev/nvme/nvme_test.c optional nvme
dev/nvme/nvme_util.c optional nvme
dev/oce/oce_hw.c optional oce pci
dev/oce/oce_if.c optional oce pci
dev/oce/oce_mbox.c optional oce pci
dev/oce/oce_queue.c optional oce pci
dev/oce/oce_sysctl.c optional oce pci
dev/oce/oce_util.c optional oce pci
dev/ocs_fc/ocs_gendump.c optional ocs_fc pci
dev/ocs_fc/ocs_pci.c optional ocs_fc pci
dev/ocs_fc/ocs_ioctl.c optional ocs_fc pci
dev/ocs_fc/ocs_os.c optional ocs_fc pci
dev/ocs_fc/ocs_utils.c optional ocs_fc pci
dev/ocs_fc/ocs_hw.c optional ocs_fc pci
dev/ocs_fc/ocs_hw_queues.c optional ocs_fc pci
dev/ocs_fc/sli4.c optional ocs_fc pci
dev/ocs_fc/ocs_sm.c optional ocs_fc pci
dev/ocs_fc/ocs_device.c optional ocs_fc pci
dev/ocs_fc/ocs_xport.c optional ocs_fc pci
dev/ocs_fc/ocs_domain.c optional ocs_fc pci
dev/ocs_fc/ocs_sport.c optional ocs_fc pci
dev/ocs_fc/ocs_els.c optional ocs_fc pci
dev/ocs_fc/ocs_fabric.c optional ocs_fc pci
dev/ocs_fc/ocs_io.c optional ocs_fc pci
dev/ocs_fc/ocs_node.c optional ocs_fc pci
dev/ocs_fc/ocs_scsi.c optional ocs_fc pci
dev/ocs_fc/ocs_unsol.c optional ocs_fc pci
dev/ocs_fc/ocs_ddump.c optional ocs_fc pci
dev/ocs_fc/ocs_mgmt.c optional ocs_fc pci
dev/ocs_fc/ocs_cam.c optional ocs_fc pci
dev/ofw/ofw_bus_if.m optional fdt
dev/ofw/ofw_bus_subr.c optional fdt
dev/ofw/ofw_cpu.c optional fdt
dev/ofw/ofw_fdt.c optional fdt
dev/ofw/ofw_if.m optional fdt
dev/ofw/ofw_graph.c optional fdt
dev/ofw/ofw_subr.c optional fdt
dev/ofw/ofwbus.c optional fdt
dev/ofw/openfirm.c optional fdt
dev/ofw/openfirmio.c optional fdt
dev/ow/ow.c optional ow \
dependency "owll_if.h" \
dependency "own_if.h"
dev/ow/owll_if.m optional ow
dev/ow/own_if.m optional ow
dev/ow/ow_temp.c optional ow_temp
dev/ow/owc_gpiobus.c optional owc gpio
dev/pbio/pbio.c optional pbio isa
dev/pccbb/pccbb.c optional cbb
dev/pccbb/pccbb_pci.c optional cbb pci
dev/pcf/pcf.c optional pcf
dev/pci/fixup_pci.c optional pci
dev/pci/hostb_pci.c optional pci
dev/pci/ignore_pci.c optional pci
dev/pci/isa_pci.c optional pci isa
dev/pci/pci.c optional pci
dev/pci/pci_if.m standard
dev/pci/pci_iov.c optional pci pci_iov
dev/pci/pci_iov_if.m standard
dev/pci/pci_iov_schema.c optional pci pci_iov
dev/pci/pci_pci.c optional pci
dev/pci/pci_subr.c optional pci
dev/pci/pci_user.c optional pci
dev/pci/pcib_if.m standard
dev/pci/pcib_support.c standard
dev/pci/vga_pci.c optional pci
dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \
compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w"
dev/ppbus/if_plip.c optional plip
dev/ppbus/lpbb.c optional lpbb
dev/ppbus/lpt.c optional lpt
dev/ppbus/pcfclock.c optional pcfclock
dev/ppbus/ppb_1284.c optional ppbus
dev/ppbus/ppb_base.c optional ppbus
dev/ppbus/ppb_msq.c optional ppbus
dev/ppbus/ppbconf.c optional ppbus
dev/ppbus/ppbus_if.m optional ppbus
dev/ppbus/ppi.c optional ppi
dev/ppbus/pps.c optional pps
dev/ppc/ppc.c optional ppc
dev/ppc/ppc_acpi.c optional ppc acpi
dev/ppc/ppc_isa.c optional ppc isa
dev/ppc/ppc_pci.c optional ppc pci
dev/ppc/ppc_puc.c optional ppc puc
dev/proto/proto_bus_isa.c optional proto acpi | proto isa
dev/proto/proto_bus_pci.c optional proto pci
dev/proto/proto_busdma.c optional proto
dev/proto/proto_core.c optional proto
dev/pst/pst-iop.c optional pst
dev/pst/pst-pci.c optional pst pci
dev/pst/pst-raid.c optional pst
dev/pty/pty.c optional pty
dev/puc/puc.c optional puc
dev/puc/puc_cfg.c optional puc
dev/puc/puc_pci.c optional puc pci
dev/pwm/pwmc.c optional pwm | pwmc
dev/pwm/pwmbus.c optional pwm | pwmbus
dev/pwm/pwmbus_if.m optional pwm | pwmbus
dev/pwm/ofw_pwm.c optional pwm fdt | pwmbus fdt
dev/pwm/ofw_pwmbus.c optional pwm fdt | pwmbus fdt
dev/pwm/pwm_backlight.c optional pwm pwm_backlight fdt
dev/quicc/quicc_core.c optional quicc
dev/ral/rt2560.c optional ral
dev/ral/rt2661.c optional ral
dev/ral/rt2860.c optional ral
dev/ral/if_ral_pci.c optional ral pci
rt2561fw.c optional rt2561fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2561fw.c"
rt2561fw.fwo optional rt2561fw | ralfw \
dependency "rt2561.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2561fw.fwo"
rt2561.fw optional rt2561fw | ralfw \
dependency "$S/contrib/dev/ral/rt2561.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2561.fw"
rt2561sfw.c optional rt2561sfw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2561sfw.c"
rt2561sfw.fwo optional rt2561sfw | ralfw \
dependency "rt2561s.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2561sfw.fwo"
rt2561s.fw optional rt2561sfw | ralfw \
dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2561s.fw"
rt2661fw.c optional rt2661fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2661fw.c"
rt2661fw.fwo optional rt2661fw | ralfw \
dependency "rt2661.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2661fw.fwo"
rt2661.fw optional rt2661fw | ralfw \
dependency "$S/contrib/dev/ral/rt2661.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2661.fw"
rt2860fw.c optional rt2860fw | ralfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rt2860fw.c"
rt2860fw.fwo optional rt2860fw | ralfw \
dependency "rt2860.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rt2860fw.fwo"
rt2860.fw optional rt2860fw | ralfw \
dependency "$S/contrib/dev/ral/rt2860.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rt2860.fw"
dev/random/random_infra.c standard
dev/random/random_harvestq.c standard
dev/random/randomdev.c optional !random_loadable
dev/random/fenestrasX/fx_brng.c optional !random_loadable random_fenestrasx
dev/random/fenestrasX/fx_main.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fenestrasX/fx_pool.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fenestrasX/fx_rng.c optional !random_loadable random_fenestrasx \
compile-with "${NORMAL_C} -I$S/crypto/blake2"
dev/random/fortuna.c optional !random_loadable !random_fenestrasx
dev/random/hash.c optional !random_loadable
dev/rccgpio/rccgpio.c optional rccgpio gpio
dev/re/if_re.c optional re
dev/rl/if_rl.c optional rl pci
dev/rndtest/rndtest.c optional rndtest
#
dev/rtsx/rtsx.c optional rtsx pci
#
dev/rtwn/if_rtwn.c optional rtwn
dev/rtwn/if_rtwn_beacon.c optional rtwn
dev/rtwn/if_rtwn_calib.c optional rtwn
dev/rtwn/if_rtwn_cam.c optional rtwn
dev/rtwn/if_rtwn_efuse.c optional rtwn
dev/rtwn/if_rtwn_fw.c optional rtwn
dev/rtwn/if_rtwn_rx.c optional rtwn
dev/rtwn/if_rtwn_task.c optional rtwn
dev/rtwn/if_rtwn_tx.c optional rtwn
#
dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci
dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci
#
dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb
dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb
# RTL8188E
dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn
dev/rtwn/rtl8188e/r88e_calib.c optional rtwn
dev/rtwn/rtl8188e/r88e_chan.c optional rtwn
dev/rtwn/rtl8188e/r88e_fw.c optional rtwn
dev/rtwn/rtl8188e/r88e_init.c optional rtwn
dev/rtwn/rtl8188e/r88e_led.c optional rtwn
dev/rtwn/rtl8188e/r88e_tx.c optional rtwn
dev/rtwn/rtl8188e/r88e_rf.c optional rtwn
dev/rtwn/rtl8188e/r88e_rom.c optional rtwn
dev/rtwn/rtl8188e/r88e_rx.c optional rtwn
dev/rtwn/rtl8188e/pci/r88ee_attach.c optional rtwn_pci pci
dev/rtwn/rtl8188e/pci/r88ee_init.c optional rtwn_pci pci
dev/rtwn/rtl8188e/pci/r88ee_rx.c optional rtwn_pci pci
dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb
dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb
# RTL8192C
dev/rtwn/rtl8192c/r92c_attach.c optional rtwn
dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn
dev/rtwn/rtl8192c/r92c_calib.c optional rtwn
dev/rtwn/rtl8192c/r92c_chan.c optional rtwn
dev/rtwn/rtl8192c/r92c_fw.c optional rtwn
dev/rtwn/rtl8192c/r92c_init.c optional rtwn
dev/rtwn/rtl8192c/r92c_llt.c optional rtwn
dev/rtwn/rtl8192c/r92c_rf.c optional rtwn
dev/rtwn/rtl8192c/r92c_rom.c optional rtwn
dev/rtwn/rtl8192c/r92c_rx.c optional rtwn
dev/rtwn/rtl8192c/r92c_tx.c optional rtwn
dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci
dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci
dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb
dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb
# RTL8192E
dev/rtwn/rtl8192e/r92e_chan.c optional rtwn
dev/rtwn/rtl8192e/r92e_fw.c optional rtwn
dev/rtwn/rtl8192e/r92e_init.c optional rtwn
dev/rtwn/rtl8192e/r92e_led.c optional rtwn
dev/rtwn/rtl8192e/r92e_rf.c optional rtwn
dev/rtwn/rtl8192e/r92e_rom.c optional rtwn
dev/rtwn/rtl8192e/r92e_rx.c optional rtwn
dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb
dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb
# RTL8812A
dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn
dev/rtwn/rtl8812a/r12a_calib.c optional rtwn
dev/rtwn/rtl8812a/r12a_caps.c optional rtwn
dev/rtwn/rtl8812a/r12a_chan.c optional rtwn
dev/rtwn/rtl8812a/r12a_fw.c optional rtwn
dev/rtwn/rtl8812a/r12a_init.c optional rtwn
dev/rtwn/rtl8812a/r12a_led.c optional rtwn
dev/rtwn/rtl8812a/r12a_rf.c optional rtwn
dev/rtwn/rtl8812a/r12a_rom.c optional rtwn
dev/rtwn/rtl8812a/r12a_rx.c optional rtwn
dev/rtwn/rtl8812a/r12a_tx.c optional rtwn
dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb
dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb
# RTL8821A
dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn
dev/rtwn/rtl8821a/r21a_calib.c optional rtwn
dev/rtwn/rtl8821a/r21a_chan.c optional rtwn
dev/rtwn/rtl8821a/r21a_fw.c optional rtwn
dev/rtwn/rtl8821a/r21a_init.c optional rtwn
dev/rtwn/rtl8821a/r21a_led.c optional rtwn
dev/rtwn/rtl8821a/r21a_rom.c optional rtwn
dev/rtwn/rtl8821a/r21a_rx.c optional rtwn
dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb
dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb
dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb
rtwn-rtl8188eefw.c optional rtwn-rtl8188eefw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eefw.fw:rtwn-rtl8188eefw:111 -mrtwn-rtl8188eefw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8188eefw.c"
rtwn-rtl8188eefw.fwo optional rtwn-rtl8188eefw | rtwnfw \
dependency "rtwn-rtl8188eefw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8188eefw.fwo"
rtwn-rtl8188eefw.fw optional rtwn-rtl8188eefw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eefw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8188eefw.fw"
rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8188eufw.c"
rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \
dependency "rtwn-rtl8188eufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8188eufw.fwo"
rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8188eufw.fw"
rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwE.c"
rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \
dependency "rtwn-rtl8192cfwE.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwE.fwo"
rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwE.fw"
rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwE_B.c"
rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \
dependency "rtwn-rtl8192cfwE_B.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwE_B.fwo"
rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwE_B.fw"
rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwT.c"
rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \
dependency "rtwn-rtl8192cfwT.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwT.fwo"
rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwT.fw"
rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192cfwU.c"
rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \
dependency "rtwn-rtl8192cfwU.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192cfwU.fwo"
rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192cfwU.fw"
rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8192eufw.c"
rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \
dependency "rtwn-rtl8192eufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8192eufw.fwo"
rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8192eufw.fw"
rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8812aufw.c"
rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \
dependency "rtwn-rtl8812aufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8812aufw.fwo"
rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8812aufw.fw"
rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rtwn-rtl8821aufw.c"
rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \
dependency "rtwn-rtl8821aufw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rtwn-rtl8821aufw.fwo"
rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \
dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rtwn-rtl8821aufw.fw"
dev/safe/safe.c optional safe
dev/scc/scc_if.m optional scc
dev/scc/scc_bfe_quicc.c optional scc quicc
dev/scc/scc_core.c optional scc
dev/scc/scc_dev_quicc.c optional scc quicc
dev/scc/scc_dev_z8530.c optional scc
dev/sdhci/sdhci.c optional sdhci
dev/sdhci/sdhci_fdt.c optional sdhci fdt regulator clk
dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio
dev/sdhci/sdhci_fsl_fdt.c optional sdhci fdt gpio regulator clk
dev/sdhci/sdhci_if.m optional sdhci
dev/sdhci/sdhci_acpi.c optional sdhci acpi
dev/sdhci/sdhci_pci.c optional sdhci pci
dev/sdio/sdio_if.m optional mmccam
dev/sdio/sdio_subr.c optional mmccam
dev/sdio/sdiob.c optional mmccam
dev/sge/if_sge.c optional sge pci
dev/siis/siis.c optional siis pci
dev/sis/if_sis.c optional sis pci
dev/sk/if_sk.c optional sk pci
dev/smbios/smbios.c optional smbios
dev/smbus/smb.c optional smb
dev/smbus/smbconf.c optional smbus
dev/smbus/smbus.c optional smbus
dev/smbus/smbus_if.m optional smbus
dev/smc/if_smc.c optional smc
dev/smc/if_smc_acpi.c optional smc acpi
dev/smc/if_smc_fdt.c optional smc fdt
dev/snp/snp.c optional snp
dev/sound/clone.c optional sound
dev/sound/unit.c optional sound
dev/sound/pci/als4000.c optional snd_als4000 pci
dev/sound/pci/atiixp.c optional snd_atiixp pci
dev/sound/pci/cmi.c optional snd_cmi pci
dev/sound/pci/cs4281.c optional snd_cs4281 pci
dev/sound/pci/csa.c optional snd_csa pci
dev/sound/pci/csapcm.c optional snd_csa pci
dev/sound/pci/emu10k1.c optional snd_emu10k1 pci
dev/sound/pci/emu10kx.c optional snd_emu10kx pci
dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci
dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci
dev/sound/pci/envy24.c optional snd_envy24 pci
dev/sound/pci/envy24ht.c optional snd_envy24ht pci
dev/sound/pci/es137x.c optional snd_es137x pci
dev/sound/pci/fm801.c optional snd_fm801 pci
dev/sound/pci/ich.c optional snd_ich pci
dev/sound/pci/maestro3.c optional snd_maestro3 pci
dev/sound/pci/neomagic.c optional snd_neomagic pci
dev/sound/pci/solo.c optional snd_solo pci
dev/sound/pci/spicds.c optional snd_spicds pci
dev/sound/pci/t4dwave.c optional snd_t4dwave pci
dev/sound/pci/via8233.c optional snd_via8233 pci
dev/sound/pci/via82c686.c optional snd_via82c686 pci
dev/sound/pci/vibes.c optional snd_vibes pci
dev/sound/pci/hda/hdaa.c optional snd_hda pci
dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci
dev/sound/pci/hda/hdac.c optional snd_hda pci
dev/sound/pci/hda/hdac_if.m optional snd_hda pci
dev/sound/pci/hda/hdacc.c optional snd_hda pci
dev/sound/pci/hdspe.c optional snd_hdspe pci
dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci
dev/sound/pcm/ac97.c optional sound
dev/sound/pcm/ac97_if.m optional sound
dev/sound/pcm/ac97_patch.c optional sound
dev/sound/pcm/buffer.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/channel.c optional sound
dev/sound/pcm/channel_if.m optional sound
dev/sound/pcm/dsp.c optional sound
dev/sound/pcm/feeder.c optional sound
dev/sound/pcm/feeder_chain.c optional sound
dev/sound/pcm/feeder_eq.c optional sound \
dependency "feeder_eq_gen.h" \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_if.m optional sound
dev/sound/pcm/feeder_format.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_matrix.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_mixer.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_rate.c optional sound \
dependency "feeder_rate_gen.h" \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/feeder_volume.c optional sound \
dependency "snd_fxdiv_gen.h"
dev/sound/pcm/mixer.c optional sound
dev/sound/pcm/mixer_if.m optional sound
dev/sound/pcm/sndstat.c optional sound
dev/sound/pcm/sound.c optional sound
dev/sound/pcm/vchan.c optional sound
dev/sound/usb/uaudio.c optional snd_uaudio usb
dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb
dev/sound/midi/midi.c optional sound
dev/sound/midi/mpu401.c optional sound
dev/sound/midi/mpu_if.m optional sound
dev/sound/midi/mpufoi_if.m optional sound
dev/sound/midi/sequencer.c optional sound
dev/sound/midi/synth_if.m optional sound
dev/spibus/ofw_spibus.c optional fdt spibus
dev/spibus/spibus.c optional spibus \
dependency "spibus_if.h"
dev/spibus/spigen.c optional spigen
dev/spibus/spibus_if.m optional spibus
dev/ste/if_ste.c optional ste pci
dev/stge/if_stge.c optional stge
dev/sym/sym_hipd.c optional sym \
dependency "$S/dev/sym/sym_{conf,defs}.h"
dev/syscons/blank/blank_saver.c optional blank_saver
dev/syscons/daemon/daemon_saver.c optional daemon_saver
dev/syscons/dragon/dragon_saver.c optional dragon_saver
dev/syscons/fade/fade_saver.c optional fade_saver
dev/syscons/fire/fire_saver.c optional fire_saver
dev/syscons/green/green_saver.c optional green_saver
dev/syscons/logo/logo.c optional logo_saver
dev/syscons/logo/logo_saver.c optional logo_saver
dev/syscons/rain/rain_saver.c optional rain_saver
dev/syscons/schistory.c optional sc
dev/syscons/scmouse.c optional sc
dev/syscons/scterm.c optional sc
dev/syscons/scterm-dumb.c optional sc !SC_NO_TERM_DUMB
dev/syscons/scterm-sc.c optional sc !SC_NO_TERM_SC
dev/syscons/scterm-teken.c optional sc !SC_NO_TERM_TEKEN
dev/syscons/scvidctl.c optional sc
dev/syscons/scvtb.c optional sc
dev/syscons/snake/snake_saver.c optional snake_saver
dev/syscons/star/star_saver.c optional star_saver
dev/syscons/syscons.c optional sc
dev/syscons/sysmouse.c optional sc
dev/syscons/warp/warp_saver.c optional warp_saver
dev/tcp_log/tcp_log_dev.c optional tcp_blackbox inet | tcp_blackbox inet6
dev/tdfx/tdfx_pci.c optional tdfx pci
dev/ti/if_ti.c optional ti pci
dev/tws/tws.c optional tws
dev/tws/tws_cam.c optional tws
dev/tws/tws_hdm.c optional tws
dev/tws/tws_services.c optional tws
dev/tws/tws_user.c optional tws
dev/uart/uart_bus_acpi.c optional uart acpi
dev/uart/uart_bus_fdt.c optional uart fdt
dev/uart/uart_bus_isa.c optional uart isa
dev/uart/uart_bus_pci.c optional uart pci
dev/uart/uart_bus_puc.c optional uart puc
dev/uart/uart_bus_scc.c optional uart scc
dev/uart/uart_core.c optional uart
dev/uart/uart_cpu_acpi.c optional uart acpi
dev/uart/uart_dbg.c optional uart gdb
dev/uart/uart_dev_imx.c optional uart uart_imx fdt
dev/uart/uart_dev_msm.c optional uart uart_msm fdt
dev/uart/uart_dev_mvebu.c optional uart uart_mvebu fdt
dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps
dev/uart/uart_dev_pl011.c optional uart pl011
dev/uart/uart_dev_quicc.c optional uart quicc
dev/uart/uart_dev_snps.c optional uart uart_snps fdt
dev/uart/uart_dev_z8530.c optional uart uart_z8530 | uart scc
dev/uart/uart_if.m optional uart
dev/uart/uart_subr.c optional uart
dev/uart/uart_tty.c optional uart
#
# USB controller drivers
#
dev/usb/controller/musb_otg.c optional musb
dev/usb/controller/dwc_otg.c optional dwcotg
dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt
dev/usb/controller/dwc_otg_acpi.c optional dwcotg acpi
dev/usb/controller/ehci.c optional ehci
dev/usb/controller/ehci_msm.c optional ehci_msm fdt
dev/usb/controller/ehci_pci.c optional ehci pci
dev/usb/controller/ohci.c optional ohci
dev/usb/controller/ohci_pci.c optional ohci pci
dev/usb/controller/uhci.c optional uhci
dev/usb/controller/uhci_pci.c optional uhci pci
dev/usb/controller/xhci.c optional xhci
dev/usb/controller/xhci_pci.c optional xhci pci
dev/usb/controller/saf1761_otg.c optional saf1761otg
dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt
dev/usb/controller/uss820dci.c optional uss820dci
dev/usb/controller/usb_controller.c optional usb
#
# USB storage drivers
#
dev/usb/storage/cfumass.c optional cfumass ctl
dev/usb/storage/umass.c optional umass
dev/usb/storage/urio.c optional urio
dev/usb/storage/ustorage_fs.c optional usfs
#
# USB core
#
dev/usb/usb_busdma.c optional usb
dev/usb/usb_core.c optional usb
dev/usb/usb_debug.c optional usb
dev/usb/usb_dev.c optional usb
dev/usb/usb_device.c optional usb
dev/usb/usb_dynamic.c optional usb
dev/usb/usb_error.c optional usb
dev/usb/usb_fdt_support.c optional usb fdt
dev/usb/usb_generic.c optional usb
dev/usb/usb_handle_request.c optional usb
dev/usb/usb_hid.c optional usb
dev/usb/usb_hub.c optional usb
dev/usb/usb_hub_acpi.c optional uacpi acpi
dev/usb/usb_if.m optional usb
dev/usb/usb_lookup.c optional usb
dev/usb/usb_mbuf.c optional usb
dev/usb/usb_msctest.c optional usb
dev/usb/usb_parse.c optional usb
dev/usb/usb_pf.c optional usb
dev/usb/usb_process.c optional usb
dev/usb/usb_request.c optional usb
dev/usb/usb_transfer.c optional usb
dev/usb/usb_util.c optional usb
#
# USB network drivers
#
dev/usb/net/if_aue.c optional aue
dev/usb/net/if_axe.c optional axe
dev/usb/net/if_axge.c optional axge
dev/usb/net/if_cdce.c optional cdce
dev/usb/net/if_cdceem.c optional cdceem
dev/usb/net/if_cue.c optional cue
dev/usb/net/if_ipheth.c optional ipheth
dev/usb/net/if_kue.c optional kue
dev/usb/net/if_mos.c optional mos
dev/usb/net/if_muge.c optional muge
dev/usb/net/if_rue.c optional rue
dev/usb/net/if_smsc.c optional smsc
dev/usb/net/if_udav.c optional udav
dev/usb/net/if_ure.c optional ure
dev/usb/net/if_usie.c optional usie
dev/usb/net/if_urndis.c optional urndis
dev/usb/net/ruephy.c optional rue
dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \
cdceem | cue | ipheth | kue | mos | \
rue | smsc | udav | ure | urndis | muge
dev/usb/net/uhso.c optional uhso
#
# USB WLAN drivers
#
dev/usb/wlan/if_rsu.c optional rsu
rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \
compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "rsu-rtl8712fw.c"
rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \
dependency "rsu-rtl8712fw.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "rsu-rtl8712fw.fwo"
rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \
dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "rsu-rtl8712fw.fw"
dev/usb/wlan/if_rum.c optional rum
dev/usb/wlan/if_run.c optional run
runfw.c optional runfw \
compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "runfw.c"
runfw.fwo optional runfw \
dependency "run.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "runfw.fwo"
run.fw optional runfw \
dependency "$S/contrib/dev/run/rt2870.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "run.fw"
dev/usb/wlan/if_uath.c optional uath
dev/usb/wlan/if_upgt.c optional upgt
dev/usb/wlan/if_ural.c optional ural
dev/usb/wlan/if_urtw.c optional urtw
dev/usb/wlan/if_zyd.c optional zyd
#
# USB serial and parallel port drivers
#
dev/usb/serial/u3g.c optional u3g
dev/usb/serial/uark.c optional uark
dev/usb/serial/ubsa.c optional ubsa
dev/usb/serial/ubser.c optional ubser
dev/usb/serial/uchcom.c optional uchcom
dev/usb/serial/ucycom.c optional ucycom
dev/usb/serial/ufoma.c optional ufoma
dev/usb/serial/uftdi.c optional uftdi
dev/usb/serial/ugensa.c optional ugensa
dev/usb/serial/uipaq.c optional uipaq
dev/usb/serial/ulpt.c optional ulpt
dev/usb/serial/umcs.c optional umcs
dev/usb/serial/umct.c optional umct
dev/usb/serial/umodem.c optional umodem
dev/usb/serial/umoscom.c optional umoscom
dev/usb/serial/uplcom.c optional uplcom
dev/usb/serial/uslcom.c optional uslcom
dev/usb/serial/uvisor.c optional uvisor
dev/usb/serial/uvscom.c optional uvscom
dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \
uchcom | ucycom | ufoma | uftdi | \
ugensa | uipaq | umcs | umct | \
umodem | umoscom | uplcom | usie | \
uslcom | uvisor | uvscom
#
# USB misc drivers
#
dev/usb/misc/cp2112.c optional cp2112
dev/usb/misc/udbp.c optional udbp
dev/usb/misc/ugold.c optional ugold
dev/usb/misc/uled.c optional uled
#
# USB input drivers
#
dev/usb/input/atp.c optional atp
dev/usb/input/uep.c optional uep
dev/usb/input/uhid.c optional uhid
dev/usb/input/uhid_snes.c optional uhid_snes
dev/usb/input/ukbd.c optional ukbd
dev/usb/input/ums.c optional ums
dev/usb/input/usbhid.c optional usbhid
dev/usb/input/wmt.c optional wmt
dev/usb/input/wsp.c optional wsp
#
# USB quirks
#
dev/usb/quirk/usb_quirk.c optional usb
#
# USB templates
#
dev/usb/template/usb_template.c optional usb_template
dev/usb/template/usb_template_audio.c optional usb_template
dev/usb/template/usb_template_cdce.c optional usb_template
dev/usb/template/usb_template_kbd.c optional usb_template
dev/usb/template/usb_template_modem.c optional usb_template
dev/usb/template/usb_template_mouse.c optional usb_template
dev/usb/template/usb_template_msc.c optional usb_template
dev/usb/template/usb_template_mtp.c optional usb_template
dev/usb/template/usb_template_phone.c optional usb_template
dev/usb/template/usb_template_serialnet.c optional usb_template
dev/usb/template/usb_template_midi.c optional usb_template
dev/usb/template/usb_template_multi.c optional usb_template
dev/usb/template/usb_template_cdceem.c optional usb_template
#
# USB video drivers
#
dev/usb/video/udl.c optional udl
#
# USB END
#
dev/videomode/videomode.c optional videomode
dev/videomode/edid.c optional videomode
dev/videomode/pickmode.c optional videomode
dev/videomode/vesagtf.c optional videomode
dev/veriexec/verified_exec.c optional mac_veriexec
dev/vge/if_vge.c optional vge
dev/viapm/viapm.c optional viapm pci
dev/virtio/virtio.c optional virtio
dev/virtio/virtqueue.c optional virtio
dev/virtio/virtio_bus_if.m optional virtio
dev/virtio/virtio_if.m optional virtio
dev/virtio/pci/virtio_pci.c optional virtio_pci
dev/virtio/pci/virtio_pci_if.m optional virtio_pci
dev/virtio/pci/virtio_pci_legacy.c optional virtio_pci
dev/virtio/pci/virtio_pci_modern.c optional virtio_pci
dev/virtio/mmio/virtio_mmio.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_acpi.c optional virtio_mmio acpi
dev/virtio/mmio/virtio_mmio_cmdline.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_fdt.c optional virtio_mmio fdt
dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/block/virtio_blk.c optional virtio_blk
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/scsi/virtio_scsi.c optional virtio_scsi
dev/virtio/random/virtio_random.c optional virtio_random
dev/virtio/console/virtio_console.c optional virtio_console
dev/vkbd/vkbd.c optional vkbd
dev/vmgenc/vmgenc_acpi.c optional acpi
dev/vmware/vmxnet3/if_vmx.c optional vmx
dev/vmware/vmci/vmci.c optional vmci
dev/vmware/vmci/vmci_datagram.c optional vmci
dev/vmware/vmci/vmci_doorbell.c optional vmci
dev/vmware/vmci/vmci_driver.c optional vmci
dev/vmware/vmci/vmci_event.c optional vmci
dev/vmware/vmci/vmci_hashtable.c optional vmci
dev/vmware/vmci/vmci_kernel_if.c optional vmci
dev/vmware/vmci/vmci_qpair.c optional vmci
dev/vmware/vmci/vmci_queue_pair.c optional vmci
dev/vmware/vmci/vmci_resource.c optional vmci
dev/vmware/pvscsi/pvscsi.c optional pvscsi
dev/vr/if_vr.c optional vr pci
dev/vt/colors/vt_termcolors.c optional vt
dev/vt/font/vt_font_default.c optional vt
dev/vt/font/vt_mouse_cursor.c optional vt
dev/vt/hw/efifb/efifb.c optional vt_efifb
dev/vt/hw/simplefb/simplefb.c optional vt_simplefb fdt
dev/vt/hw/vbefb/vbefb.c optional vt_vbefb
dev/vt/hw/fb/vt_fb.c optional vt
dev/vt/hw/vga/vt_vga.c optional vt vt_vga
dev/vt/logo/logo_freebsd.c optional vt splash
dev/vt/logo/logo_beastie.c optional vt splash
dev/vt/vt_buf.c optional vt
dev/vt/vt_consolectl.c optional vt
dev/vt/vt_core.c optional vt
dev/vt/vt_cpulogos.c optional vt splash
dev/vt/vt_font.c optional vt
dev/vt/vt_sysmouse.c optional vt
dev/vte/if_vte.c optional vte pci
dev/watchdog/watchdog.c standard
dev/wg/if_wg.c optional wg \
compile-with "${NORMAL_C} -include $S/dev/wg/compat.h"
dev/wg/wg_cookie.c optional wg \
compile-with "${NORMAL_C} -include $S/dev/wg/compat.h"
dev/wg/wg_crypto.c optional wg \
compile-with "${NORMAL_C} -include $S/dev/wg/compat.h"
dev/wg/wg_noise.c optional wg \
compile-with "${NORMAL_C} -include $S/dev/wg/compat.h"
dev/wpi/if_wpi.c optional wpi pci
wpifw.c optional wpifw \
compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
clean "wpifw.c"
wpifw.fwo optional wpifw \
dependency "wpi.fw" \
compile-with "${NORMAL_FWO}" \
no-implicit-rule \
clean "wpifw.fwo"
wpi.fw optional wpifw \
dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \
compile-with "${NORMAL_FW}" \
no-obj no-implicit-rule \
clean "wpi.fw"
dev/xdma/controller/pl330.c optional xdma pl330 fdt
dev/xdma/xdma.c optional xdma
dev/xdma/xdma_bank.c optional xdma
dev/xdma/xdma_bio.c optional xdma
dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt
dev/xdma/xdma_if.m optional xdma
dev/xdma/xdma_iommu.c optional xdma
dev/xdma/xdma_mbuf.c optional xdma
dev/xdma/xdma_queue.c optional xdma
dev/xdma/xdma_sg.c optional xdma
dev/xdma/xdma_sglist.c optional xdma
dev/xen/balloon/balloon.c optional xenhvm
dev/xen/blkfront/blkfront.c optional xenhvm
dev/xen/blkback/blkback.c optional xenhvm
dev/xen/bus/xen_intr.c optional xenhvm
dev/xen/bus/xenpv.c optional xenhvm
dev/xen/console/xen_console.c optional xenhvm
dev/xen/control/control.c optional xenhvm
dev/xen/cpu/xen_acpi_cpu.c optional xenhvm
dev/xen/efi/pvefi.c optional xenhvm xenefi efirt
dev/xen/grant_table/grant_table.c optional xenhvm
dev/xen/netback/netback.c optional xenhvm
dev/xen/netfront/netfront.c optional xenhvm
dev/xen/timer/xen_timer.c optional xenhvm xentimer
dev/xen/xenpci/xenpci.c optional xenpci
dev/xen/xenstore/xenstore.c optional xenhvm
dev/xen/xenstore/xenstore_dev.c optional xenhvm
dev/xen/xenstore/xenstored_dev.c optional xenhvm
dev/xen/evtchn/evtchn_dev.c optional xenhvm
dev/xen/privcmd/privcmd.c optional xenhvm
dev/xen/gntdev/gntdev.c optional xenhvm
dev/xen/debug/debug.c optional xenhvm
dev/xl/if_xl.c optional xl pci
dev/xl/xlphy.c optional xl pci
fs/autofs/autofs.c optional autofs
fs/autofs/autofs_vfsops.c optional autofs
fs/autofs/autofs_vnops.c optional autofs
fs/deadfs/dead_vnops.c standard
fs/devfs/devfs_devs.c standard
fs/devfs/devfs_dir.c standard
fs/devfs/devfs_rule.c standard
fs/devfs/devfs_vfsops.c standard
fs/devfs/devfs_vnops.c standard
fs/fdescfs/fdesc_vfsops.c optional fdescfs
fs/fdescfs/fdesc_vnops.c optional fdescfs
fs/fifofs/fifo_vnops.c standard
fs/cuse/cuse.c optional cuse
fs/fuse/fuse_device.c optional fusefs
fs/fuse/fuse_file.c optional fusefs
fs/fuse/fuse_internal.c optional fusefs
fs/fuse/fuse_io.c optional fusefs
fs/fuse/fuse_ipc.c optional fusefs
fs/fuse/fuse_main.c optional fusefs
fs/fuse/fuse_node.c optional fusefs
fs/fuse/fuse_vfsops.c optional fusefs
fs/fuse/fuse_vnops.c optional fusefs
fs/mntfs/mntfs_vnops.c standard
fs/msdosfs/msdosfs_conv.c optional msdosfs
fs/msdosfs/msdosfs_denode.c optional msdosfs
fs/msdosfs/msdosfs_fat.c optional msdosfs
fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv
fs/msdosfs/msdosfs_lookup.c optional msdosfs
fs/msdosfs/msdosfs_vfsops.c optional msdosfs
fs/msdosfs/msdosfs_vnops.c optional msdosfs
fs/nfs/nfs_commonkrpc.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonsubs.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonport.c optional nfscl | nfslockd | nfsd
fs/nfs/nfs_commonacl.c optional nfscl | nfslockd | nfsd
fs/nfsclient/nfs_clcomsubs.c optional nfscl
fs/nfsclient/nfs_clsubs.c optional nfscl
fs/nfsclient/nfs_clstate.c optional nfscl
fs/nfsclient/nfs_clkrpc.c optional nfscl
fs/nfsclient/nfs_clrpcops.c optional nfscl
fs/nfsclient/nfs_clvnops.c optional nfscl
fs/nfsclient/nfs_clnode.c optional nfscl
fs/nfsclient/nfs_clvfsops.c optional nfscl
fs/nfsclient/nfs_clport.c optional nfscl
fs/nfsclient/nfs_clbio.c optional nfscl
fs/nfsclient/nfs_clnfsiod.c optional nfscl
fs/nfsserver/nfs_fha_new.c optional nfsd inet
fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet
fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet
fs/nfsserver/nfs_nfsdstate.c optional nfsd inet
fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet
fs/nfsserver/nfs_nfsdserv.c optional nfsd inet
fs/nfsserver/nfs_nfsdport.c optional nfsd inet
fs/nfsserver/nfs_nfsdcache.c optional nfsd inet
fs/nullfs/null_subr.c optional nullfs
fs/nullfs/null_vfsops.c optional nullfs
fs/nullfs/null_vnops.c optional nullfs
fs/procfs/procfs.c optional procfs
fs/procfs/procfs_dbregs.c optional procfs
fs/procfs/procfs_fpregs.c optional procfs
fs/procfs/procfs_map.c optional procfs
fs/procfs/procfs_mem.c optional procfs
fs/procfs/procfs_note.c optional procfs
fs/procfs/procfs_osrel.c optional procfs
fs/procfs/procfs_regs.c optional procfs
fs/procfs/procfs_rlimit.c optional procfs
fs/procfs/procfs_status.c optional procfs
fs/procfs/procfs_type.c optional procfs
fs/pseudofs/pseudofs.c optional pseudofs
fs/pseudofs/pseudofs_fileno.c optional pseudofs
fs/pseudofs/pseudofs_vncache.c optional pseudofs
fs/pseudofs/pseudofs_vnops.c optional pseudofs
fs/smbfs/smbfs_io.c optional smbfs
fs/smbfs/smbfs_node.c optional smbfs
fs/smbfs/smbfs_smb.c optional smbfs
fs/smbfs/smbfs_subr.c optional smbfs
fs/smbfs/smbfs_vfsops.c optional smbfs
fs/smbfs/smbfs_vnops.c optional smbfs
fs/tarfs/tarfs_io.c optional tarfs compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd"
fs/tarfs/tarfs_subr.c optional tarfs
fs/tarfs/tarfs_vfsops.c optional tarfs
fs/tarfs/tarfs_vnops.c optional tarfs
fs/udf/osta.c optional udf
fs/udf/udf_iconv.c optional udf_iconv
fs/udf/udf_vfsops.c optional udf
fs/udf/udf_vnops.c optional udf
fs/unionfs/union_subr.c optional unionfs
fs/unionfs/union_vfsops.c optional unionfs
fs/unionfs/union_vnops.c optional unionfs
fs/tmpfs/tmpfs_vnops.c optional tmpfs
fs/tmpfs/tmpfs_fifoops.c optional tmpfs
fs/tmpfs/tmpfs_vfsops.c optional tmpfs
fs/tmpfs/tmpfs_subr.c optional tmpfs
gdb/gdb_cons.c optional gdb
gdb/gdb_main.c optional gdb
gdb/gdb_packet.c optional gdb
gdb/netgdb.c optional ddb debugnet gdb netgdb inet
geom/bde/g_bde.c optional geom_bde
geom/bde/g_bde_crypt.c optional geom_bde
geom/bde/g_bde_lock.c optional geom_bde
geom/bde/g_bde_work.c optional geom_bde
geom/cache/g_cache.c optional geom_cache
geom/concat/g_concat.c optional geom_concat
geom/eli/g_eli.c optional geom_eli
geom/eli/g_eli_crypto.c optional geom_eli
geom/eli/g_eli_ctl.c optional geom_eli
geom/eli/g_eli_hmac.c optional geom_eli
geom/eli/g_eli_integrity.c optional geom_eli
geom/eli/g_eli_key.c optional geom_eli
geom/eli/g_eli_key_cache.c optional geom_eli
geom/eli/g_eli_privacy.c optional geom_eli
geom/eli/pkcs5v2.c optional geom_eli
geom/gate/g_gate.c optional geom_gate
geom/geom_bsd_enc.c optional geom_part_bsd
geom/geom_ccd.c optional ccd | geom_ccd
geom/geom_ctl.c standard
geom/geom_dev.c standard
geom/geom_disk.c standard
geom/geom_dump.c standard
geom/geom_event.c standard
geom/geom_flashmap.c optional fdt cfi | fdt mx25l | mmcsd | fdt n25q | fdt at45d
geom/geom_io.c standard
geom/geom_kern.c standard
geom/geom_map.c optional geom_map
geom/geom_redboot.c optional geom_redboot
geom/geom_slice.c standard
geom/geom_subr.c standard
geom/geom_vfs.c standard
geom/journal/g_journal.c optional geom_journal
geom/journal/g_journal_ufs.c optional geom_journal
geom/label/g_label.c optional geom_label | geom_label_gpt
geom/label/g_label_ext2fs.c optional geom_label
geom/label/g_label_flashmap.c optional geom_label
geom/label/g_label_iso9660.c optional geom_label
geom/label/g_label_msdosfs.c optional geom_label
geom/label/g_label_ntfs.c optional geom_label
geom/label/g_label_reiserfs.c optional geom_label
geom/label/g_label_ufs.c optional geom_label
geom/label/g_label_gpt.c optional geom_label | geom_label_gpt
geom/label/g_label_disk_ident.c optional geom_label
geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm
geom/mirror/g_mirror.c optional geom_mirror
geom/mirror/g_mirror_ctl.c optional geom_mirror
geom/mountver/g_mountver.c optional geom_mountver
geom/multipath/g_multipath.c optional geom_multipath
geom/nop/g_nop.c optional geom_nop
geom/part/g_part.c standard
geom/part/g_part_if.m standard
geom/part/g_part_apm.c optional geom_part_apm
geom/part/g_part_bsd.c optional geom_part_bsd
geom/part/g_part_bsd64.c optional geom_part_bsd64
geom/part/g_part_ebr.c optional geom_part_ebr
geom/part/g_part_gpt.c optional geom_part_gpt
geom/part/g_part_ldm.c optional geom_part_ldm
geom/part/g_part_mbr.c optional geom_part_mbr
geom/part/g_part_vtoc8.c optional geom_part_vtoc8
geom/raid/g_raid.c optional geom_raid
geom/raid/g_raid_ctl.c optional geom_raid
geom/raid/g_raid_md_if.m optional geom_raid
geom/raid/g_raid_tr_if.m optional geom_raid
geom/raid/md_ddf.c optional geom_raid
geom/raid/md_intel.c optional geom_raid
geom/raid/md_jmicron.c optional geom_raid
geom/raid/md_nvidia.c optional geom_raid
geom/raid/md_promise.c optional geom_raid
geom/raid/md_sii.c optional geom_raid
geom/raid/tr_concat.c optional geom_raid
geom/raid/tr_raid0.c optional geom_raid
geom/raid/tr_raid1.c optional geom_raid
geom/raid/tr_raid1e.c optional geom_raid
geom/raid/tr_raid5.c optional geom_raid
geom/raid3/g_raid3.c optional geom_raid3
geom/raid3/g_raid3_ctl.c optional geom_raid3
geom/shsec/g_shsec.c optional geom_shsec
geom/stripe/g_stripe.c optional geom_stripe
geom/union/g_union.c optional geom_union
geom/uzip/g_uzip.c optional geom_uzip
geom/uzip/g_uzip_lzma.c optional geom_uzip
geom/uzip/g_uzip_wrkthr.c optional geom_uzip
geom/uzip/g_uzip_zlib.c optional geom_uzip
geom/uzip/g_uzip_zstd.c optional geom_uzip zstdio \
compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd"
geom/vinum/geom_vinum.c optional geom_vinum
geom/vinum/geom_vinum_create.c optional geom_vinum
geom/vinum/geom_vinum_drive.c optional geom_vinum
geom/vinum/geom_vinum_plex.c optional geom_vinum
geom/vinum/geom_vinum_volume.c optional geom_vinum
geom/vinum/geom_vinum_subr.c optional geom_vinum
geom/vinum/geom_vinum_raid5.c optional geom_vinum
geom/vinum/geom_vinum_share.c optional geom_vinum
geom/vinum/geom_vinum_list.c optional geom_vinum
geom/vinum/geom_vinum_rm.c optional geom_vinum
geom/vinum/geom_vinum_init.c optional geom_vinum
geom/vinum/geom_vinum_state.c optional geom_vinum
geom/vinum/geom_vinum_rename.c optional geom_vinum
geom/vinum/geom_vinum_move.c optional geom_vinum
geom/vinum/geom_vinum_events.c optional geom_vinum
geom/virstor/binstream.c optional geom_virstor
geom/virstor/g_virstor.c optional geom_virstor
geom/virstor/g_virstor_md.c optional geom_virstor
geom/zero/g_zero.c optional geom_zero
fs/ext2fs/ext2_acl.c optional ext2fs
fs/ext2fs/ext2_alloc.c optional ext2fs
fs/ext2fs/ext2_balloc.c optional ext2fs
fs/ext2fs/ext2_bmap.c optional ext2fs
fs/ext2fs/ext2_csum.c optional ext2fs
fs/ext2fs/ext2_extattr.c optional ext2fs
fs/ext2fs/ext2_extents.c optional ext2fs
fs/ext2fs/ext2_inode.c optional ext2fs
fs/ext2fs/ext2_inode_cnv.c optional ext2fs
fs/ext2fs/ext2_hash.c optional ext2fs
fs/ext2fs/ext2_htree.c optional ext2fs
fs/ext2fs/ext2_lookup.c optional ext2fs
fs/ext2fs/ext2_subr.c optional ext2fs
fs/ext2fs/ext2_vfsops.c optional ext2fs
fs/ext2fs/ext2_vnops.c optional ext2fs
#
isa/isa_if.m standard
isa/isa_common.c optional isa
isa/isahint.c optional isa
isa/pnp.c optional isa isapnp
isa/pnpparse.c optional isa isapnp
fs/cd9660/cd9660_bmap.c optional cd9660
fs/cd9660/cd9660_lookup.c optional cd9660
fs/cd9660/cd9660_node.c optional cd9660
fs/cd9660/cd9660_rrip.c optional cd9660
fs/cd9660/cd9660_util.c optional cd9660
fs/cd9660/cd9660_vfsops.c optional cd9660
fs/cd9660/cd9660_vnops.c optional cd9660
fs/cd9660/cd9660_iconv.c optional cd9660_iconv
gnu/gcov/gcc_4_7.c optional gcov \
warning "kernel contains GPL licensed gcov support"
gnu/gcov/gcov_fs.c optional gcov lindebugfs \
compile-with "${LINUXKPI_C}"
gnu/gcov/gcov_subr.c optional gcov
kern/bus_if.m standard
kern/clock_if.m standard
kern/cpufreq_if.m standard
kern/device_if.m standard
kern/imgact_binmisc.c optional imgact_binmisc
kern/imgact_elf.c standard
kern/imgact_elf32.c optional compat_freebsd32
kern/imgact_shell.c standard
kern/init_main.c standard
kern/init_sysent.c standard
kern/ksched.c optional _kposix_priority_scheduling
kern/kern_acct.c standard
kern/kern_alq.c optional alq
kern/kern_boottrace.c standard
kern/kern_clock.c standard
kern/kern_clocksource.c standard
kern/kern_condvar.c standard
kern/kern_conf.c standard
kern/kern_cons.c standard
kern/kern_cpu.c standard
kern/kern_cpuset.c standard
kern/kern_context.c standard
kern/kern_descrip.c standard
kern/kern_devctl.c standard
kern/kern_dtrace.c optional kdtrace_hooks
kern/kern_dump.c standard
kern/kern_environment.c standard
kern/kern_et.c standard
kern/kern_event.c standard
kern/kern_exec.c standard
kern/kern_exit.c standard
kern/kern_fail.c standard
kern/kern_ffclock.c standard
kern/kern_fork.c standard
kern/kern_hhook.c standard
kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
kern/kern_kcov.c optional kcov \
compile-with "${NORMAL_C:N-fsanitize*} ${NORMAL_C:M-fsanitize=kernel-memory}"
kern/kern_khelp.c standard
kern/kern_kthread.c standard
kern/kern_ktr.c optional ktr
kern/kern_ktrace.c standard
kern/kern_linker.c standard
kern/kern_lock.c standard
kern/kern_lockf.c standard
kern/kern_lockstat.c optional kdtrace_hooks
kern/kern_loginclass.c standard
kern/kern_malloc.c standard
kern/kern_mbuf.c standard
kern/kern_mib.c standard
kern/kern_module.c standard
kern/kern_mtxpool.c standard
kern/kern_mutex.c standard
kern/kern_ntptime.c standard
kern/kern_osd.c standard
kern/kern_physio.c standard
kern/kern_pmc.c standard
kern/kern_poll.c optional device_polling
kern/kern_priv.c standard
kern/kern_proc.c standard
kern/kern_procctl.c standard
kern/kern_prot.c standard
kern/kern_racct.c standard
kern/kern_rangelock.c standard
kern/kern_rctl.c standard
kern/kern_resource.c standard
kern/kern_rmlock.c standard
kern/kern_rwlock.c standard
kern/kern_sdt.c optional kdtrace_hooks
kern/kern_sema.c standard
kern/kern_sendfile.c standard
kern/kern_sharedpage.c standard
kern/kern_shutdown.c standard
kern/kern_sig.c standard
kern/kern_switch.c standard
kern/kern_sx.c standard
kern/kern_synch.c standard
kern/kern_syscalls.c standard
kern/kern_sysctl.c standard
kern/kern_tc.c standard
kern/kern_thr.c standard
kern/kern_thread.c standard
kern/kern_time.c standard
kern/kern_timeout.c standard
kern/kern_tslog.c optional tslog
kern/kern_ubsan.c optional kubsan
kern/kern_umtx.c standard
kern/kern_uuid.c standard
kern/kern_vnodedumper.c standard
kern/kern_xxx.c standard
kern/link_elf.c standard
kern/linker_if.m standard
kern/md4c.c optional netsmb
kern/md5c.c standard
kern/p1003_1b.c standard
kern/posix4_mib.c standard
kern/sched_4bsd.c optional sched_4bsd
kern/sched_ule.c optional sched_ule
kern/serdev_if.m standard
kern/stack_protector.c standard \
compile-with "${NORMAL_C:N-fstack-protector*}"
kern/subr_acl_nfs4.c optional ufs_acl | zfs
kern/subr_acl_posix1e.c optional ufs_acl
kern/subr_asan.c optional kasan \
compile-with "${NORMAL_C:N-fsanitize*:N-fstack-protector*}"
kern/subr_autoconf.c standard
kern/subr_blist.c standard
kern/subr_boot.c standard
kern/subr_bus.c standard
kern/subr_bus_dma.c standard
kern/subr_bufring.c standard
kern/subr_capability.c standard
kern/subr_clock.c standard
kern/subr_compressor.c standard \
compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd"
kern/subr_coverage.c optional coverage \
compile-with "${NORMAL_C:N-fsanitize*}"
kern/subr_counter.c standard
kern/subr_csan.c optional kcsan \
compile-with "${NORMAL_C:N-fsanitize*:N-fstack-protector*}"
kern/subr_devstat.c standard
kern/subr_disk.c standard
kern/subr_early.c standard
kern/subr_epoch.c standard
kern/subr_eventhandler.c standard
kern/subr_fattime.c standard
kern/subr_firmware.c optional firmware
kern/subr_filter.c standard
kern/subr_gtaskqueue.c standard
kern/subr_hash.c standard
kern/subr_hints.c standard
kern/subr_kdb.c standard
kern/subr_kobj.c standard
kern/subr_lock.c standard
kern/subr_log.c standard
kern/subr_mchain.c optional libmchain
kern/subr_module.c standard
kern/subr_msan.c optional kmsan \
compile-with "${NORMAL_C:N-fsanitize*:N-fstack-protector*}"
kern/subr_msgbuf.c standard
kern/subr_param.c standard
kern/subr_pcpu.c standard
kern/subr_pctrie.c standard
kern/subr_pidctrl.c standard
kern/subr_power.c standard
kern/subr_prf.c standard
kern/subr_prng.c standard
kern/subr_prof.c standard
kern/subr_rangeset.c standard
kern/subr_rman.c standard
kern/subr_rtc.c standard
kern/subr_sbuf.c standard
kern/subr_scanf.c standard
kern/subr_sglist.c standard
kern/subr_sleepqueue.c standard
kern/subr_smp.c standard
kern/subr_smr.c standard
kern/subr_stack.c optional ddb | stack | ktr
kern/subr_stats.c optional stats
kern/subr_taskqueue.c standard
kern/subr_terminal.c optional vt
kern/subr_trap.c standard
kern/subr_turnstile.c standard
kern/subr_uio.c standard
kern/subr_unit.c standard
kern/subr_vmem.c standard
kern/subr_witness.c optional witness
kern/sys_capability.c standard
kern/sys_eventfd.c standard
kern/sys_generic.c standard
kern/sys_getrandom.c standard
kern/sys_pipe.c standard
kern/sys_procdesc.c standard
kern/sys_process.c standard
kern/sys_socket.c standard
kern/syscalls.c standard
kern/sysv_ipc.c standard
kern/sysv_msg.c optional sysvmsg
kern/sysv_sem.c optional sysvsem
kern/sysv_shm.c optional sysvshm
kern/tty.c standard
kern/tty_compat.c optional compat_43tty
kern/tty_info.c standard
kern/tty_inq.c standard
kern/tty_outq.c standard
kern/tty_pts.c standard
kern/tty_tty.c standard
kern/tty_ttydisc.c standard
kern/uipc_accf.c standard
kern/uipc_debug.c optional ddb
kern/uipc_domain.c standard
kern/uipc_ktls.c optional kern_tls
kern/uipc_mbuf.c standard
kern/uipc_mbuf2.c standard
kern/uipc_mbufhash.c standard
kern/uipc_mqueue.c optional p1003_1b_mqueue
kern/uipc_sem.c optional p1003_1b_semaphores
kern/uipc_shm.c standard
kern/uipc_sockbuf.c standard
kern/uipc_socket.c standard
kern/uipc_syscalls.c standard
kern/uipc_usrreq.c standard
kern/vfs_acl.c standard
kern/vfs_aio.c standard
kern/vfs_bio.c standard
kern/vfs_cache.c standard
kern/vfs_cluster.c standard
kern/vfs_default.c standard
kern/vfs_export.c standard
kern/vfs_extattr.c standard
kern/vfs_hash.c standard
kern/vfs_init.c standard
kern/vfs_lookup.c standard
kern/vfs_mount.c standard
kern/vfs_mountroot.c standard
kern/vfs_subr.c standard
kern/vfs_syscalls.c standard
kern/vfs_vnops.c standard
#
# Kernel GSS-API
#
gssd.h optional kgssapi \
dependency "$S/kgssapi/gssd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \
no-obj no-implicit-rule before-depend local \
clean "gssd.h"
gssd_xdr.c optional kgssapi \
dependency "$S/kgssapi/gssd.x gssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \
no-ctfconvert no-implicit-rule before-depend local \
clean "gssd_xdr.c"
gssd_clnt.c optional kgssapi \
dependency "$S/kgssapi/gssd.x gssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \
no-ctfconvert no-implicit-rule before-depend local \
clean "gssd_clnt.c"
kgssapi/gss_accept_sec_context.c optional kgssapi
kgssapi/gss_add_oid_set_member.c optional kgssapi
kgssapi/gss_acquire_cred.c optional kgssapi
kgssapi/gss_canonicalize_name.c optional kgssapi
kgssapi/gss_create_empty_oid_set.c optional kgssapi
kgssapi/gss_delete_sec_context.c optional kgssapi
kgssapi/gss_display_status.c optional kgssapi
kgssapi/gss_export_name.c optional kgssapi
kgssapi/gss_get_mic.c optional kgssapi
kgssapi/gss_init_sec_context.c optional kgssapi
kgssapi/gss_impl.c optional kgssapi
kgssapi/gss_import_name.c optional kgssapi
kgssapi/gss_names.c optional kgssapi
kgssapi/gss_pname_to_uid.c optional kgssapi
kgssapi/gss_release_buffer.c optional kgssapi
kgssapi/gss_release_cred.c optional kgssapi
kgssapi/gss_release_name.c optional kgssapi
kgssapi/gss_release_oid_set.c optional kgssapi
kgssapi/gss_set_cred_option.c optional kgssapi
kgssapi/gss_test_oid_set_member.c optional kgssapi
kgssapi/gss_unwrap.c optional kgssapi
kgssapi/gss_verify_mic.c optional kgssapi
kgssapi/gss_wrap.c optional kgssapi
kgssapi/gss_wrap_size_limit.c optional kgssapi
kgssapi/gssd_prot.c optional kgssapi
kgssapi/krb5/krb5_mech.c optional kgssapi
kgssapi/krb5/kcrypto.c optional kgssapi
kgssapi/krb5/kcrypto_aes.c optional kgssapi
kgssapi/kgss_if.m optional kgssapi
kgssapi/gsstest.c optional kgssapi_debug
# These files in libkern/ are those needed by all architectures. Some
# of the files in libkern/ are only needed on some architectures, e.g.,
# libkern/divdi3.c is needed by i386 but not alpha. Also, some of these
# routines may be optimized for a particular platform. In either case,
# the file should be moved to conf/files.<arch> from here.
#
libkern/arc4random.c standard
libkern/arc4random_uniform.c standard
libkern/asprintf.c standard
libkern/bcd.c standard
libkern/bsearch.c standard
libkern/crc16.c standard
libkern/explicit_bzero.c standard
libkern/fnmatch.c standard
libkern/gsb_crc32.c standard
libkern/iconv.c optional libiconv
libkern/iconv_converter_if.m optional libiconv
libkern/iconv_ucs.c optional libiconv
libkern/iconv_xlat.c optional libiconv
libkern/iconv_xlat16.c optional libiconv
libkern/inet_aton.c standard
libkern/inet_ntoa.c standard
libkern/inet_ntop.c standard
libkern/inet_pton.c standard
libkern/jenkins_hash.c standard
libkern/murmur3_32.c standard
libkern/memcchr.c standard
libkern/memchr.c standard
libkern/memmem.c optional gdb
libkern/qsort.c standard
libkern/qsort_r.c standard
libkern/random.c standard
libkern/scanc.c standard
libkern/strcasecmp.c standard
libkern/strcasestr.c standard
libkern/strcat.c standard
libkern/strchr.c standard
libkern/strchrnul.c optional gdb
libkern/strcpy.c standard
libkern/strcspn.c standard
libkern/strdup.c standard
libkern/strndup.c standard
libkern/strlcat.c standard
libkern/strlcpy.c standard
libkern/strncat.c standard
libkern/strncpy.c standard
libkern/strnlen.c standard
libkern/strnstr.c standard
libkern/strrchr.c standard
libkern/strsep.c standard
libkern/strspn.c standard
libkern/strstr.c standard
libkern/strtol.c standard
libkern/strtoq.c standard
libkern/strtoul.c standard
libkern/strtouq.c standard
libkern/strvalid.c standard
libkern/timingsafe_bcmp.c standard
contrib/zlib/adler32.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/compress.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/crc32.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/deflate.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/inffast.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/inflate.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/inftrees.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/trees.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/uncompr.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
contrib/zlib/zutil.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib \
compile-with "${ZLIB_C}"
dev/zlib/zlib_mod.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
dev/zlib/zcalloc.c optional crypto | geom_uzip | \
mxge | ddb_ctf | gzio | zfs | zlib
net/altq/altq_cbq.c optional altq
net/altq/altq_codel.c optional altq
net/altq/altq_hfsc.c optional altq
net/altq/altq_fairq.c optional altq
net/altq/altq_priq.c optional altq
net/altq/altq_red.c optional altq
net/altq/altq_rio.c optional altq
net/altq/altq_rmclass.c optional altq
net/altq/altq_subr.c optional altq
net/bpf.c standard
net/bpf_buffer.c optional bpf
net/bpf_jitter.c optional bpf_jitter
net/bpf_filter.c optional bpf | netgraph_bpf
net/bpf_zerocopy.c optional bpf
net/bridgestp.c optional bridge | if_bridge
net/ieee8023ad_lacp.c optional lagg
net/if.c standard
net/ifq.c standard
net/if_bridge.c optional bridge inet | if_bridge inet
net/if_clone.c standard
net/if_dead.c standard
net/if_disc.c optional disc
net/if_edsc.c optional edsc
net/if_enc.c optional enc inet | enc inet6
net/if_epair.c optional epair
net/if_ethersubr.c optional ether
net/if_fwsubr.c optional fwip
net/if_gif.c optional gif inet | gif inet6 | \
netgraph_gif inet | netgraph_gif inet6
net/if_gre.c optional gre inet | gre inet6
net/if_ipsec.c optional inet ipsec | inet6 ipsec
net/if_lagg.c optional lagg
net/if_loop.c optional loop
net/if_llatbl.c standard
net/if_me.c optional me inet
net/if_media.c standard
net/if_mib.c standard
net/if_ovpn.c optional ovpn inet | ovpn inet6
net/if_stf.c optional stf inet inet6
net/if_tuntap.c optional tuntap
net/if_vlan.c optional vlan
net/if_vxlan.c optional vxlan inet | vxlan inet6
net/ifdi_if.m optional ether pci iflib
net/iflib.c optional ether pci iflib
net/iflib_clone.c optional ether pci iflib
net/mp_ring.c optional ether iflib
net/mppcc.c optional netgraph_mppc_compression
net/mppcd.c optional netgraph_mppc_compression
net/netisr.c standard
net/debugnet.c optional inet debugnet
net/debugnet_inet.c optional inet debugnet
net/pfil.c optional ether | inet
net/radix.c standard
net/route.c standard
net/route/nhgrp.c optional route_mpath
net/route/nhgrp_ctl.c optional route_mpath
net/route/nhop.c standard
net/route/nhop_ctl.c standard
net/route/nhop_utils.c standard
net/route/fib_algo.c optional fib_algo
net/route/route_ctl.c standard
net/route/route_ddb.c optional ddb
net/route/route_helpers.c standard
net/route/route_ifaddrs.c standard
net/route/route_rtentry.c standard
net/route/route_subscription.c standard
net/route/route_tables.c standard
net/route/route_temporal.c standard
net/rss_config.c optional inet rss | inet6 rss
net/rtsock.c standard
net/slcompress.c optional netgraph_vjc
net/toeplitz.c optional inet rss | inet6 rss | route_mpath
net/vnet.c optional vimage
net80211/ieee80211.c optional wlan
net80211/ieee80211_acl.c optional wlan wlan_acl
net80211/ieee80211_action.c optional wlan
net80211/ieee80211_adhoc.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_ageq.c optional wlan
net80211/ieee80211_amrr.c optional wlan | wlan_amrr
net80211/ieee80211_crypto.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp
net80211/ieee80211_crypto_none.c optional wlan
net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip
net80211/ieee80211_crypto_wep.c optional wlan wlan_wep
net80211/ieee80211_ddb.c optional wlan ddb
net80211/ieee80211_dfs.c optional wlan
net80211/ieee80211_freebsd.c optional wlan
net80211/ieee80211_hostap.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_ht.c optional wlan
net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh
net80211/ieee80211_input.c optional wlan
net80211/ieee80211_ioctl.c optional wlan
net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_monitor.c optional wlan
net80211/ieee80211_node.c optional wlan
net80211/ieee80211_output.c optional wlan
net80211/ieee80211_phy.c optional wlan
net80211/ieee80211_power.c optional wlan
net80211/ieee80211_proto.c optional wlan
net80211/ieee80211_radiotap.c optional wlan
net80211/ieee80211_ratectl.c optional wlan
net80211/ieee80211_ratectl_none.c optional wlan
net80211/ieee80211_regdomain.c optional wlan
net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt
net80211/ieee80211_scan.c optional wlan
net80211/ieee80211_scan_sta.c optional wlan
net80211/ieee80211_sta.c optional wlan \
compile-with "${NORMAL_C} -Wno-unused-function"
net80211/ieee80211_superg.c optional wlan ieee80211_support_superg
net80211/ieee80211_scan_sw.c optional wlan
net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma
net80211/ieee80211_vht.c optional wlan
net80211/ieee80211_wds.c optional wlan
net80211/ieee80211_xauth.c optional wlan wlan_xauth
net80211/ieee80211_alq.c optional wlan ieee80211_alq
netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth
netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb
netgraph/bluetooth/drivers/ubt/ng_ubt_intel.c optional netgraph_bluetooth_ubt usb
netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb
netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci
netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci
netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap
netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket
netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket
netgraph/netflow/netflow.c optional netgraph_netflow
netgraph/netflow/netflow_v9.c optional netgraph_netflow
netgraph/netflow/ng_netflow.c optional netgraph_netflow
netgraph/ng_UI.c optional netgraph_UI
netgraph/ng_async.c optional netgraph_async
netgraph/ng_base.c optional netgraph
netgraph/ng_bpf.c optional netgraph_bpf
netgraph/ng_bridge.c optional netgraph_bridge
netgraph/ng_car.c optional netgraph_car
netgraph/ng_checksum.c optional netgraph_checksum
netgraph/ng_cisco.c optional netgraph_cisco
netgraph/ng_deflate.c optional netgraph_deflate
netgraph/ng_device.c optional netgraph_device
netgraph/ng_echo.c optional netgraph_echo
netgraph/ng_eiface.c optional netgraph_eiface
netgraph/ng_ether.c optional netgraph_ether
netgraph/ng_ether_echo.c optional netgraph_ether_echo
netgraph/ng_frame_relay.c optional netgraph_frame_relay
netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet
netgraph/ng_gif_demux.c optional netgraph_gif_demux
netgraph/ng_hole.c optional netgraph_hole
netgraph/ng_iface.c optional netgraph_iface
netgraph/ng_ip_input.c optional netgraph_ip_input
netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall
netgraph/ng_ksocket.c optional netgraph_ksocket
netgraph/ng_l2tp.c optional netgraph_l2tp
netgraph/ng_lmi.c optional netgraph_lmi
netgraph/ng_macfilter.c optional netgraph_macfilter
netgraph/ng_mppc.c optional netgraph_mppc_compression | \
netgraph_mppc_encryption
netgraph/ng_nat.c optional netgraph_nat inet libalias
netgraph/ng_one2many.c optional netgraph_one2many
netgraph/ng_parse.c optional netgraph
netgraph/ng_patch.c optional netgraph_patch
netgraph/ng_pipe.c optional netgraph_pipe
netgraph/ng_ppp.c optional netgraph_ppp
netgraph/ng_pppoe.c optional netgraph_pppoe
netgraph/ng_pptpgre.c optional netgraph_pptpgre
netgraph/ng_pred1.c optional netgraph_pred1
netgraph/ng_rfc1490.c optional netgraph_rfc1490
netgraph/ng_socket.c optional netgraph_socket
netgraph/ng_split.c optional netgraph_split
netgraph/ng_tag.c optional netgraph_tag
netgraph/ng_tcpmss.c optional netgraph_tcpmss
netgraph/ng_tee.c optional netgraph_tee
netgraph/ng_tty.c optional netgraph_tty
netgraph/ng_vjc.c optional netgraph_vjc
netgraph/ng_vlan.c optional netgraph_vlan
netgraph/ng_vlan_rotate.c optional netgraph_vlan_rotate
netinet/accf_data.c optional accept_filter_data inet
netinet/accf_dns.c optional accept_filter_dns inet
netinet/accf_http.c optional accept_filter_http inet
netinet/if_ether.c optional inet ether
netinet/igmp.c optional inet
netinet/in.c optional inet
netinet/in_cksum.c optional inet | inet6
netinet/in_debug.c optional inet ddb
netinet/in_kdtrace.c optional inet | inet6
netinet/ip_carp.c optional inet carp | inet6 carp
netinet/in_fib.c optional inet
netinet/in_fib_algo.c optional inet fib_algo
netinet/in_gif.c optional gif inet | netgraph_gif inet
netinet/ip_gre.c optional gre inet
netinet/ip_id.c optional inet
netinet/in_jail.c optional inet
netinet/in_mcast.c optional inet
netinet/in_pcb.c optional inet | inet6
netinet/in_prot.c optional inet | inet6
netinet/in_proto.c optional inet | inet6
netinet/in_rmx.c optional inet
netinet/in_rss.c optional inet rss
netinet/ip_divert.c optional ipdivert inet | ipdivert inet6
netinet/ip_ecn.c optional inet | inet6
netinet/ip_encap.c optional inet | inet6
netinet/ip_fastfwd.c optional inet
netinet/ip_icmp.c optional inet | inet6
netinet/ip_input.c optional inet
netinet/ip_mroute.c optional mrouting inet
netinet/ip_options.c optional inet
netinet/ip_output.c optional inet
netinet/ip_reass.c optional inet
netinet/raw_ip.c optional inet | inet6
netinet/cc/cc.c optional cc_newreno inet | cc_vegas inet | \
cc_htcp inet | cc_hd inet | cc_dctcp inet | cc_cubic inet | \
cc_chd inet | cc_cdg inet | cc_newreno inet6 | cc_vegas inet6 | \
cc_htcp inet6 | cc_hd inet6 |cc_dctcp inet6 | cc_cubic inet6 | \
cc_chd inet6 | cc_cdg inet6
netinet/cc/cc_cdg.c optional inet cc_cdg tcp_hhook
netinet/cc/cc_chd.c optional inet cc_chd tcp_hhook
netinet/cc/cc_cubic.c optional inet cc_cubic | inet6 cc_cubic
netinet/cc/cc_dctcp.c optional inet cc_dctcp | inet6 cc_dctcp
netinet/cc/cc_hd.c optional inet cc_hd tcp_hhook
netinet/cc/cc_htcp.c optional inet cc_htcp | inet6 cc_htcp
netinet/cc/cc_newreno.c optional inet cc_newreno | inet6 cc_newreno
netinet/cc/cc_vegas.c optional inet cc_vegas tcp_hhook
netinet/khelp/h_ertt.c optional inet tcp_hhook
netinet/sctp_asconf.c optional inet sctp | inet6 sctp
netinet/sctp_auth.c optional inet sctp | inet6 sctp
netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp
netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp
netinet/sctp_crc32.c optional inet | inet6
netinet/sctp_indata.c optional inet sctp | inet6 sctp
netinet/sctp_input.c optional inet sctp | inet6 sctp
netinet/sctp_kdtrace.c optional inet sctp | inet6 sctp
netinet/sctp_output.c optional inet sctp | inet6 sctp
netinet/sctp_pcb.c optional inet sctp | inet6 sctp
netinet/sctp_peeloff.c optional inet sctp | inet6 sctp
netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp
netinet/sctp_syscalls.c optional inet sctp | inet6 sctp
netinet/sctp_sysctl.c optional inet sctp | inet6 sctp
netinet/sctp_timer.c optional inet sctp | inet6 sctp
netinet/sctp_usrreq.c optional inet sctp | inet6 sctp
netinet/sctputil.c optional inet sctp | inet6 sctp
netinet/siftr.c optional inet siftr alq | inet6 siftr alq
netinet/tcp_ecn.c optional inet | inet6
netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413
netinet/tcp_hostcache.c optional inet | inet6
netinet/tcp_input.c optional inet | inet6
netinet/tcp_log_buf.c optional tcp_blackbox inet | tcp_blackbox inet6
netinet/tcp_lro.c optional inet | inet6
netinet/tcp_output.c optional inet | inet6
netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6
netinet/tcp_hpts.c optional tcphpts inet | tcphpts inet6
netinet/tcp_ratelimit.c optional ratelimit inet | ratelimit inet6
netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap \
compile-with "${NORMAL_C} ${NO_WNONNULL}"
netinet/tcp_reass.c optional inet | inet6
netinet/tcp_sack.c optional inet | inet6
netinet/tcp_stats.c optional stats inet | stats inet6
netinet/tcp_subr.c optional inet | inet6
netinet/tcp_syncache.c optional inet | inet6
netinet/tcp_timer.c optional inet | inet6
netinet/tcp_timewait.c optional inet | inet6
netinet/tcp_usrreq.c optional inet | inet6
netinet/udp_usrreq.c optional inet | inet6
netinet/libalias/alias.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_mod.c optional libalias | netgraph_nat
netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet
netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet
netinet/netdump/netdump_client.c optional inet debugnet netdump
netinet6/dest6.c optional inet6
netinet6/frag6.c optional inet6
netinet6/icmp6.c optional inet6
netinet6/in6.c optional inet6
netinet6/in6_cksum.c optional inet6
netinet6/in6_fib.c optional inet6
netinet6/in6_fib_algo.c optional inet6 fib_algo
netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6
netinet6/in6_ifattach.c optional inet6
netinet6/in6_jail.c optional inet6
netinet6/in6_mcast.c optional inet6
netinet6/in6_pcb.c optional inet6
netinet6/in6_proto.c optional inet6
netinet6/in6_rmx.c optional inet6
netinet6/in6_rss.c optional inet6 rss
netinet6/in6_src.c optional inet6
netinet6/ip6_fastfwd.c optional inet6
netinet6/ip6_forward.c optional inet6
netinet6/ip6_gre.c optional gre inet6
netinet6/ip6_id.c optional inet6
netinet6/ip6_input.c optional inet6
netinet6/ip6_mroute.c optional mrouting inet6
netinet6/ip6_output.c optional inet6
netinet6/mld6.c optional inet6
netinet6/nd6.c optional inet6
netinet6/nd6_nbr.c optional inet6
netinet6/nd6_rtr.c optional inet6
netinet6/raw_ip6.c optional inet6
netinet6/route6.c optional inet6
netinet6/scope6.c optional inet6
netinet6/sctp6_usrreq.c optional inet6 sctp
netinet6/udp6_usrreq.c optional inet6
netipsec/ipsec.c optional ipsec inet | ipsec inet6
netipsec/ipsec_input.c optional ipsec inet | ipsec inet6
netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6
netipsec/ipsec_mod.c optional ipsec inet | ipsec inet6
netipsec/ipsec_output.c optional ipsec inet | ipsec inet6
netipsec/ipsec_pcb.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/key.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/key_debug.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/keysock.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/subr_ipsec.c optional ipsec inet | ipsec inet6 | \
ipsec_support inet | ipsec_support inet6
netipsec/udpencap.c optional ipsec inet
netipsec/xform_ah.c optional ipsec inet | ipsec inet6
netipsec/xform_esp.c optional ipsec inet | ipsec inet6
netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6
netipsec/xform_tcp.c optional ipsec inet tcp_signature | \
ipsec inet6 tcp_signature | ipsec_support inet tcp_signature | \
ipsec_support inet6 tcp_signature
netlink/netlink_generic_kpi.c standard
netlink/netlink_glue.c standard
netlink/netlink_message_parser.c standard
netlink/netlink_domain.c optional netlink
netlink/netlink_generic.c optional netlink
netlink/netlink_io.c optional netlink
netlink/netlink_message_writer.c optional netlink
netlink/netlink_module.c optional netlink
netlink/netlink_route.c optional netlink
netlink/route/iface_drivers.c optional netlink
netlink/route/iface.c optional netlink
netlink/route/neigh.c optional netlink
netlink/route/nexthop.c optional netlink
netlink/route/rt.c optional netlink
netpfil/ipfw/dn_aqm_codel.c optional inet dummynet
netpfil/ipfw/dn_aqm_pie.c optional inet dummynet
netpfil/ipfw/dn_heap.c optional inet dummynet
netpfil/ipfw/dn_sched_fifo.c optional inet dummynet
netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet
netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet
netpfil/ipfw/dn_sched_prio.c optional inet dummynet
netpfil/ipfw/dn_sched_qfq.c optional inet dummynet
netpfil/ipfw/dn_sched_rr.c optional inet dummynet
netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet
netpfil/ipfw/ip_dummynet.c optional inet dummynet
netpfil/ipfw/ip_dn_io.c optional inet dummynet
netpfil/ipfw/ip_dn_glue.c optional inet dummynet
netpfil/ipfw/ip_fw2.c optional inet ipfirewall
netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall
netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall \
compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall
netpfil/ipfw/ip_fw_log.c optional inet ipfirewall
netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall
netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall
netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall
netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall
netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat
netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64clat.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64clat_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64 compile-with "${NORMAL_C} -I$S/contrib/ck/include"
netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \
ipfirewall_nat64
netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \
ipfirewall_nptv6
netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \
ipfirewall_nptv6
netpfil/ipfw/pmod/ip_fw_pmod.c optional inet ipfirewall_pmod
netpfil/ipfw/pmod/tcpmod.c optional inet ipfirewall_pmod
netpfil/pf/if_pflog.c optional pflog pf inet
netpfil/pf/if_pfsync.c optional pfsync pf inet
netpfil/pf/pf.c optional pf inet
netpfil/pf/pf_if.c optional pf inet
netpfil/pf/pf_ioctl.c optional pf inet
netpfil/pf/pf_lb.c optional pf inet
netpfil/pf/pf_norm.c optional pf inet
netpfil/pf/pf_nv.c optional pf inet
netpfil/pf/pf_osfp.c optional pf inet
netpfil/pf/pf_ruleset.c optional pf inet
netpfil/pf/pf_syncookies.c optional pf inet
netpfil/pf/pf_table.c optional pf inet
netpfil/pf/pfsync_nv.c optional pfsync pf inet
netpfil/pf/in4_cksum.c optional pf inet
netsmb/smb_conn.c optional netsmb
netsmb/smb_crypt.c optional netsmb
netsmb/smb_dev.c optional netsmb
netsmb/smb_iod.c optional netsmb
netsmb/smb_rq.c optional netsmb
netsmb/smb_smb.c optional netsmb
netsmb/smb_subr.c optional netsmb
netsmb/smb_trantcp.c optional netsmb
netsmb/smb_usr.c optional netsmb
nfs/bootp_subr.c optional bootp nfscl
nfs/krpc_subr.c optional bootp nfscl
nfs/nfs_diskless.c optional nfscl nfs_root
nfs/nfs_nfssvc.c optional nfscl | nfslockd | nfsd
nlm/nlm_advlock.c optional nfslockd | nfsd
nlm/nlm_prot_clnt.c optional nfslockd | nfsd
nlm/nlm_prot_impl.c optional nfslockd | nfsd
nlm/nlm_prot_server.c optional nfslockd | nfsd
nlm/nlm_prot_svc.c optional nfslockd | nfsd
nlm/nlm_prot_xdr.c optional nfslockd | nfsd
nlm/sm_inter_xdr.c optional nfslockd | nfsd
# Linux Kernel Programming Interface
compat/linuxkpi/common/src/linux_80211.c optional compat_linuxkpi wlan \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_80211_macops.c optional compat_linuxkpi wlan \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_acpi.c optional compat_linuxkpi acpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_current.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_devres.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_dmi.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_domain.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_firmware.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_fpu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_hrtimer.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_i2c.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_i2cbb.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_interrupt.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_kthread.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_lock.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_mhi.c optional compat_linuxkpi wlan \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_netdev.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_page.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_tasklet.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C} -I$S/contrib/ck/include"
compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shmemfs.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_shrinker.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_skbuff.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_work.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_xarray.c optional compat_linuxkpi \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/lkpi_iic_if.m optional compat_linuxkpi
compat/linuxkpi/common/src/linux_seq_file.c optional compat_linuxkpi | lindebugfs \
compile-with "${LINUXKPI_C}"
compat/linuxkpi/common/src/linux_simple_attr.c optional compat_linuxkpi | lindebugfs \
compile-with "${LINUXKPI_C}"
compat/lindebugfs/lindebugfs.c optional lindebugfs \
compile-with "${LINUXKPI_C}"
# OpenFabrics Enterprise Distribution (Infiniband)
net/if_infiniband.c optional ofed | lagg
ofed/drivers/infiniband/core/ib_addr.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_agent.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cache.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cma.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_core_uverbs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_cq.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_device.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_fmr_pool.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_iwcm.c optional ofed \
compile-with "${OFED_C} ${NO_WUNUSED_BUT_SET_VARIABLE}"
ofed/drivers/infiniband/core/ib_iwpm_msg.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_iwpm_util.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_mad.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_mad_rmpp.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_multicast.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_packer.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_rdma_core.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_sa_query.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_smi.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_sysfs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ucm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ucma.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_ud_header.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_umem.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_user_mad.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_cmd.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_ioctl.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_main.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_marshall.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_async_fd.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_counters.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_cq.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_device.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_dm.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_flow_action.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_std_types_mr.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_uverbs_uapi.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/core/ib_verbs.c optional ofed \
compile-with "${OFED_C}"
ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
#ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \
# compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
#ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \
# compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/"
ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}"
ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/"
ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}"
dev/irdma/icrdma.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_cm.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ctrl.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hmc.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hw.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/icrdma_hw.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/fbsd_kcompat.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_kcompat.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_pble.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_puda.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uda.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uk.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_utils.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_verbs.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ws.c optional irdma ice inet inet6 pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/mthca/mthca_allocator.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_av.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_catas.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_cmd.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_cq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_eq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mad.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_main.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mcg.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_memfree.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_mr.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_pd.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_profile.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_provider.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_qp.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_reset.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_srq.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_uar.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_ah.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_cong.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_devx.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mem.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_diag_cnt.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eswitch.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tcp.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fwdump.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mpfs.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_rl.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_tls.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_lib/mlx5_gid.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_dim.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_hw_tls.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_iq.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_rl.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
dev/mlx5/mlx5_en/mlx5_en_port_buffer.c optional mlx5en pci inet inet6 \
compile-with "${OFED_C}"
# crypto support
opencrypto/cbc_mac.c optional crypto
opencrypto/criov.c optional crypto
opencrypto/crypto.c optional crypto
opencrypto/cryptodev.c optional cryptodev
opencrypto/cryptodev_if.m optional crypto
opencrypto/cryptosoft.c optional crypto
opencrypto/cryptodeflate.c optional crypto
opencrypto/gmac.c optional crypto
opencrypto/gfmult.c optional crypto
opencrypto/ktls_ocf.c optional kern_tls
opencrypto/rmd160.c optional crypto
opencrypto/xform_aes_cbc.c optional crypto
opencrypto/xform_aes_icm.c optional crypto
opencrypto/xform_aes_xts.c optional crypto
opencrypto/xform_cbc_mac.c optional crypto
opencrypto/xform_chacha20_poly1305.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
opencrypto/xform_cml.c optional crypto
opencrypto/xform_deflate.c optional crypto
opencrypto/xform_gmac.c optional crypto
opencrypto/xform_null.c optional crypto
opencrypto/xform_poly1305.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
opencrypto/xform_rmd160.c optional crypto
opencrypto/xform_sha1.c optional crypto
opencrypto/xform_sha2.c optional crypto
contrib/libsodium/src/libsodium/crypto_core/ed25519/ref10/ed25519_ref10.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function"
contrib/libsodium/src/libsodium/crypto_core/hchacha20/core_hchacha20.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/onetimeauth_poly1305.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/donna/poly1305_donna.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/scalarmult_curve25519.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_scalarmult/curve25519/ref10/x25519_ref10.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium -Wno-unused-function"
contrib/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_stream/chacha20/ref/chacha20_ref.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
contrib/libsodium/src/libsodium/crypto_verify/sodium/verify.c \
optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include/sodium -I$S/crypto/libsodium"
crypto/libsodium/randombytes.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
crypto/libsodium/utils.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd
rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd
rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd
rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd
rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd
rpc/replay.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd
rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd
#
# Kernel RPC-over-TLS
#
rpctlscd.h optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v pthread.h > rpctlscd.h" \
no-obj no-implicit-rule before-depend local \
clean "rpctlscd.h"
rpctlscd_xdr.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlscd.x -o rpctlscd_xdr.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlscd_xdr.c"
rpctlscd_clnt.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlscd.x rpctlscd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlscd.x | grep -v string.h > rpctlscd_clnt.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlscd_clnt.c"
rpctlssd.h optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v pthread.h > rpctlssd.h" \
no-obj no-implicit-rule before-depend local \
clean "rpctlssd.h"
rpctlssd_xdr.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/rpc/rpcsec_tls/rpctlssd.x -o rpctlssd_xdr.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlssd_xdr.c"
rpctlssd_clnt.c optional krpc | nfslockd | nfscl | nfsd \
dependency "$S/rpc/rpcsec_tls/rpctlssd.x rpctlssd.h" \
compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/rpc/rpcsec_tls/rpctlssd.x | grep -v string.h > rpctlssd_clnt.c" no-ctfconvert \
no-implicit-rule before-depend local \
clean "rpctlssd_clnt.c"
rpc/rpcsec_tls/rpctls_impl.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcsec_tls/auth_tls.c optional krpc | nfslockd | nfscl | nfsd
rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi
security/audit/audit.c optional audit
security/audit/audit_arg.c optional audit
security/audit/audit_bsm.c optional audit
security/audit/audit_bsm_db.c optional audit
security/audit/audit_bsm_klib.c optional audit
security/audit/audit_dtrace.c optional dtaudit audit | dtraceall audit compile-with "${CDDL_C}"
security/audit/audit_pipe.c optional audit
security/audit/audit_syscalls.c standard
security/audit/audit_trigger.c optional audit
security/audit/audit_worker.c optional audit
security/audit/bsm_domain.c optional audit
security/audit/bsm_errno.c optional audit
security/audit/bsm_fcntl.c optional audit
security/audit/bsm_socket_type.c optional audit
security/audit/bsm_token.c optional audit
security/mac/mac_audit.c optional mac audit
security/mac/mac_cred.c optional mac
security/mac/mac_kdb.c optional mac
security/mac/mac_framework.c optional mac
security/mac/mac_inet.c optional mac inet | mac inet6
security/mac/mac_inet6.c optional mac inet6
security/mac/mac_label.c optional mac
security/mac/mac_net.c optional mac
security/mac/mac_pipe.c optional mac
security/mac/mac_posix_sem.c optional mac
security/mac/mac_posix_shm.c optional mac
security/mac/mac_priv.c optional mac
security/mac/mac_process.c optional mac
security/mac/mac_socket.c optional mac
security/mac/mac_syscalls.c standard
security/mac/mac_system.c optional mac
security/mac/mac_sysv_msg.c optional mac
security/mac/mac_sysv_sem.c optional mac
security/mac/mac_sysv_shm.c optional mac
security/mac/mac_vfs.c optional mac
security/mac_biba/mac_biba.c optional mac_biba
security/mac_ddb/mac_ddb.c optional mac_ddb
security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended
security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended
security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended
security/mac_ifoff/mac_ifoff.c optional mac_ifoff
security/mac_lomac/mac_lomac.c optional mac_lomac
security/mac_mls/mac_mls.c optional mac_mls
security/mac_none/mac_none.c optional mac_none
security/mac_ntpd/mac_ntpd.c optional mac_ntpd
security/mac_partition/mac_partition.c optional mac_partition
security/mac_portacl/mac_portacl.c optional mac_portacl
security/mac_priority/mac_priority.c optional mac_priority
security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids
security/mac_stub/mac_stub.c optional mac_stub
security/mac_test/mac_test.c optional mac_test
security/mac_veriexec/mac_veriexec.c optional mac_veriexec
security/mac_veriexec/veriexec_fingerprint.c optional mac_veriexec
security/mac_veriexec/veriexec_metadata.c optional mac_veriexec
security/mac_veriexec_parser/mac_veriexec_parser.c optional mac_veriexec mac_veriexec_parser
security/mac_veriexec/mac_veriexec_rmd160.c optional mac_veriexec_rmd160
security/mac_veriexec/mac_veriexec_sha1.c optional mac_veriexec_sha1
security/mac_veriexec/mac_veriexec_sha256.c optional mac_veriexec_sha256
security/mac_veriexec/mac_veriexec_sha384.c optional mac_veriexec_sha384
security/mac_veriexec/mac_veriexec_sha512.c optional mac_veriexec_sha512
teken/teken.c optional sc !SC_NO_TERM_TEKEN | vt
ufs/ffs/ffs_alloc.c optional ffs
ufs/ffs/ffs_balloc.c optional ffs
ufs/ffs/ffs_inode.c optional ffs
ufs/ffs/ffs_snapshot.c optional ffs
ufs/ffs/ffs_softdep.c optional ffs
ufs/ffs/ffs_subr.c optional ffs | geom_label
ufs/ffs/ffs_tables.c optional ffs | geom_label
ufs/ffs/ffs_vfsops.c optional ffs
ufs/ffs/ffs_vnops.c optional ffs
ufs/ffs/ffs_rawread.c optional ffs directio
ufs/ffs/ffs_suspend.c optional ffs
ufs/ufs/ufs_acl.c optional ffs
ufs/ufs/ufs_bmap.c optional ffs
ufs/ufs/ufs_dirhash.c optional ffs
ufs/ufs/ufs_extattr.c optional ffs
ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL
ufs/ufs/ufs_inode.c optional ffs
ufs/ufs/ufs_lookup.c optional ffs
ufs/ufs/ufs_quota.c optional ffs
ufs/ufs/ufs_vfsops.c optional ffs
ufs/ufs/ufs_vnops.c optional ffs
vm/device_pager.c standard
vm/phys_pager.c standard
vm/redzone.c optional DEBUG_REDZONE
vm/sg_pager.c standard
vm/swap_pager.c standard
vm/uma_core.c standard
vm/uma_dbg.c standard
vm/memguard.c optional DEBUG_MEMGUARD
vm/vm_domainset.c standard
vm/vm_fault.c standard
vm/vm_glue.c standard
vm/vm_init.c standard
vm/vm_kern.c standard
vm/vm_map.c standard
vm/vm_meter.c standard
vm/vm_mmap.c standard
vm/vm_object.c standard
vm/vm_page.c standard
vm/vm_pageout.c standard
vm/vm_pager.c standard
vm/vm_phys.c standard
vm/vm_radix.c standard
vm/vm_reserv.c standard
vm/vm_swapout.c optional !NO_SWAPPING
vm/vm_swapout_dummy.c optional NO_SWAPPING
vm/vm_unix.c standard
vm/vnode_pager.c standard
xen/features.c optional xenhvm
xen/xen_common.c optional xenhvm
xen/xenbus/xenbus_if.m optional xenhvm
xen/xenbus/xenbus.c optional xenhvm
xen/xenbus/xenbusb_if.m optional xenhvm
xen/xenbus/xenbusb.c optional xenhvm
xen/xenbus/xenbusb_front.c optional xenhvm
xen/xenbus/xenbusb_back.c optional xenhvm
xen/xenmem/xenmem_if.m optional xenhvm
xdr/xdr.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_array.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_mbuf.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_mem.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_reference.c optional xdr | krpc | nfslockd | nfscl | nfsd
xdr/xdr_sizeof.c optional xdr | krpc | nfslockd | nfscl | nfsd
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 8779e512f7be..e4b476aff112 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
Version: 2.1.99
Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.2
+Linux-Maximum: 6.3
Linux-Minimum: 3.10
diff --git a/sys/contrib/openzfs/cmd/arc_summary b/sys/contrib/openzfs/cmd/arc_summary
index 5d10e903fcba..426e0207052d 100755
--- a/sys/contrib/openzfs/cmd/arc_summary
+++ b/sys/contrib/openzfs/cmd/arc_summary
@@ -1,1076 +1,1034 @@
#!/usr/bin/env python3
#
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
# Copyright (c) 2017 Scot W. Stevenson <scot.stevenson@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Print statistics on the ZFS ARC Cache and other information
Provides basic information on the ARC, its efficiency, the L2ARC (if present),
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
the in-source documentation and code at
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
The original introduction to arc_summary can be found at
http://cuddletech.com/?p=454
"""
import argparse
import os
import subprocess
import sys
import time
import errno
# We can't use env -S portably, and we need python3 -u to handle pipes in
# the shell abruptly closing the way we want to, so...
import io
if isinstance(sys.__stderr__.buffer, io.BufferedWriter):
os.execv(sys.executable, [sys.executable, "-u"] + sys.argv)
DESCRIPTION = 'Print ARC and other statistics for OpenZFS'
INDENT = ' '*8
LINE_LENGTH = 72
DATE_FORMAT = '%a %b %d %H:%M:%S %Y'
TITLE = 'ZFS Subsystem Report'
SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split()
SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')'
# Tunables and SPL are handled separately because they come from
# different sources
SECTION_PATHS = {'arc': 'arcstats',
'dmu': 'dmu_tx',
'l2arc': 'arcstats', # L2ARC stuff lives in arcstats
- 'vdev': 'vdev_cache_stats',
'zfetch': 'zfetchstats',
'zil': 'zil'}
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-a', '--alternate', action='store_true', default=False,
help='use alternate formatting for tunables and SPL',
dest='alt')
parser.add_argument('-d', '--description', action='store_true', default=False,
help='print descriptions with tunables and SPL',
dest='desc')
parser.add_argument('-g', '--graph', action='store_true', default=False,
help='print graph on ARC use and exit', dest='graph')
parser.add_argument('-p', '--page', type=int, dest='page',
help='print page by number (DEPRECATED, use "-s")')
parser.add_argument('-r', '--raw', action='store_true', default=False,
help='dump all available data with minimal formatting',
dest='raw')
parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP)
ARGS = parser.parse_args()
if sys.platform.startswith('freebsd'):
# Requires py36-sysctl on FreeBSD
import sysctl
- VDEV_CACHE_SIZE = 'vdev.cache_size'
-
def is_value(ctl):
return ctl.type != sysctl.CTLTYPE_NODE
def namefmt(ctl, base='vfs.zfs.'):
# base is removed from the name
cut = len(base)
return ctl.name[cut:]
def load_kstats(section):
base = 'kstat.zfs.misc.{section}.'.format(section=section)
fmt = lambda kstat: '{name} : {value}'.format(name=namefmt(kstat, base),
value=kstat.value)
kstats = sysctl.filter(base)
return [fmt(kstat) for kstat in kstats if is_value(kstat)]
def get_params(base):
ctls = sysctl.filter(base)
return {namefmt(ctl): str(ctl.value) for ctl in ctls if is_value(ctl)}
def get_tunable_params():
return get_params('vfs.zfs')
def get_vdev_params():
return get_params('vfs.zfs.vdev')
def get_version_impl(request):
# FreeBSD reports versions for zpl and spa instead of zfs and spl.
name = {'zfs': 'zpl',
'spl': 'spa'}[request]
mib = 'vfs.zfs.version.{}'.format(name)
version = sysctl.filter(mib)[0].value
return '{} version {}'.format(name, version)
def get_descriptions(_request):
ctls = sysctl.filter('vfs.zfs')
return {namefmt(ctl): ctl.description for ctl in ctls if is_value(ctl)}
elif sys.platform.startswith('linux'):
KSTAT_PATH = '/proc/spl/kstat/zfs'
SPL_PATH = '/sys/module/spl/parameters'
TUNABLES_PATH = '/sys/module/zfs/parameters'
- VDEV_CACHE_SIZE = 'zfs_vdev_cache_size'
-
def load_kstats(section):
path = os.path.join(KSTAT_PATH, section)
with open(path) as f:
return list(f)[2:] # Get rid of header
def get_params(basepath):
"""Collect information on the Solaris Porting Layer (SPL) or the
tunables, depending on the PATH given. Does not check if PATH is
legal.
"""
result = {}
for name in os.listdir(basepath):
path = os.path.join(basepath, name)
with open(path) as f:
value = f.read()
result[name] = value.strip()
return result
def get_spl_params():
return get_params(SPL_PATH)
def get_tunable_params():
return get_params(TUNABLES_PATH)
def get_vdev_params():
return get_params(TUNABLES_PATH)
def get_version_impl(request):
# The original arc_summary called /sbin/modinfo/{spl,zfs} to get
# the version information. We switch to /sys/module/{spl,zfs}/version
# to make sure we get what is really loaded in the kernel
try:
with open("/sys/module/{}/version".format(request)) as f:
return f.read().strip()
except:
return "(unknown)"
def get_descriptions(request):
"""Get the descriptions of the Solaris Porting Layer (SPL) or the
tunables, return with minimal formatting.
"""
if request not in ('spl', 'zfs'):
print('ERROR: description of "{0}" requested)'.format(request))
sys.exit(1)
descs = {}
target_prefix = 'parm:'
# We would prefer to do this with /sys/modules -- see the discussion at
# get_version() -- but there isn't a way to get the descriptions from
# there, so we fall back on modinfo
command = ["/sbin/modinfo", request, "-0"]
info = ''
try:
info = subprocess.run(command, stdout=subprocess.PIPE,
check=True, universal_newlines=True)
raw_output = info.stdout.split('\0')
except subprocess.CalledProcessError:
print("Error: Descriptions not available",
"(can't access kernel module)")
sys.exit(1)
for line in raw_output:
if not line.startswith(target_prefix):
continue
line = line[len(target_prefix):].strip()
name, raw_desc = line.split(':', 1)
desc = raw_desc.rsplit('(', 1)[0]
if desc == '':
desc = '(No description found)'
descs[name.strip()] = desc.strip()
return descs
def handle_unraisableException(exc_type, exc_value=None, exc_traceback=None,
err_msg=None, object=None):
handle_Exception(exc_type, object, exc_traceback)
def handle_Exception(ex_cls, ex, tb):
if ex_cls is KeyboardInterrupt:
sys.exit()
if ex_cls is BrokenPipeError:
# It turns out that while sys.exit() triggers an exception
# not handled message on Python 3.8+, os._exit() does not.
os._exit(0)
if ex_cls is OSError:
if ex.errno == errno.ENOTCONN:
sys.exit()
raise ex
if hasattr(sys,'unraisablehook'): # Python 3.8+
sys.unraisablehook = handle_unraisableException
sys.excepthook = handle_Exception
def cleanup_line(single_line):
"""Format a raw line of data from /proc and isolate the name value
part, returning a tuple with each. Currently, this gets rid of the
middle '4'. For example "arc_no_grow 4 0" returns the tuple
("arc_no_grow", "0").
"""
name, _, value = single_line.split()
return name, value
def draw_graph(kstats_dict):
"""Draw a primitive graph representing the basic information on the
ARC -- its size and the proportion used by MFU and MRU -- and quit.
We use max size of the ARC to calculate how full it is. This is a
very rough representation.
"""
arc_stats = isolate_section('arcstats', kstats_dict)
GRAPH_INDENT = ' '*4
GRAPH_WIDTH = 60
arc_size = f_bytes(arc_stats['size'])
arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
mfu_size = f_bytes(arc_stats['mfu_size'])
mru_size = f_bytes(arc_stats['mru_size'])
meta_size = f_bytes(arc_stats['arc_meta_used'])
dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
dnode_size = f_bytes(arc_stats['dnode_size'])
info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} '
'DNODE {5} ({6})')
info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
meta_size, dnode_size, dnode_limit)
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
info_line = GRAPH_INDENT+info_spc+info_line
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
total_ticks = float(arc_perc)*GRAPH_WIDTH
mfu_ticks = mfu_perc*GRAPH_WIDTH
mru_ticks = mru_perc*GRAPH_WIDTH
other_ticks = total_ticks-(mfu_ticks+mru_ticks)
core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
for line in ('', info_line, graph_line, core_line, graph_line, ''):
print(line)
def f_bytes(byte_string):
"""Return human-readable representation of a byte value in
powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
points. Values smaller than one KiB are returned without
decimal points. Note "bytes" is a reserved keyword.
"""
prefixes = ([2**80, "YiB"], # yobibytes (yotta)
[2**70, "ZiB"], # zebibytes (zetta)
[2**60, "EiB"], # exbibytes (exa)
[2**50, "PiB"], # pebibytes (peta)
[2**40, "TiB"], # tebibytes (tera)
[2**30, "GiB"], # gibibytes (giga)
[2**20, "MiB"], # mebibytes (mega)
[2**10, "KiB"]) # kibibytes (kilo)
bites = int(byte_string)
if bites >= 2**10:
for limit, unit in prefixes:
if bites >= limit:
value = bites / limit
break
result = '{0:.1f} {1}'.format(value, unit)
else:
result = '{0} Bytes'.format(bites)
return result
def f_hits(hits_string):
"""Create a human-readable representation of the number of hits.
The single-letter symbols used are SI to avoid the confusion caused
by the different "short scale" and "long scale" representations in
English, which use the same words for different values. See
https://en.wikipedia.org/wiki/Names_of_large_numbers and:
https://physics.nist.gov/cuu/Units/prefixes.html
"""
numbers = ([10**24, 'Y'], # yotta (septillion)
[10**21, 'Z'], # zetta (sextillion)
[10**18, 'E'], # exa (quintrillion)
[10**15, 'P'], # peta (quadrillion)
[10**12, 'T'], # tera (trillion)
[10**9, 'G'], # giga (billion)
[10**6, 'M'], # mega (million)
[10**3, 'k']) # kilo (thousand)
hits = int(hits_string)
if hits >= 1000:
for limit, symbol in numbers:
if hits >= limit:
value = hits/limit
break
result = "%0.1f%s" % (value, symbol)
else:
result = "%d" % hits
return result
def f_perc(value1, value2):
"""Calculate percentage and return in human-readable form. If
rounding produces the result '0.0' though the first number is
not zero, include a 'less-than' symbol to avoid confusion.
Division by zero is handled by returning 'n/a'; no error
is called.
"""
v1 = float(value1)
v2 = float(value2)
try:
perc = 100 * v1/v2
except ZeroDivisionError:
result = 'n/a'
else:
result = '{0:0.1f} %'.format(perc)
if result == '0.0 %' and v1 > 0:
result = '< 0.1 %'
return result
def format_raw_line(name, value):
"""For the --raw option for the tunable and SPL outputs, decide on the
correct formatting based on the --alternate flag.
"""
if ARGS.alt:
result = '{0}{1}={2}'.format(INDENT, name, value)
else:
# Right-align the value within the line length if it fits,
# otherwise just separate it from the name by a single space.
fit = LINE_LENGTH - len(INDENT) - len(name)
overflow = len(value) + 1
w = max(fit, overflow)
result = '{0}{1}{2:>{w}}'.format(INDENT, name, value, w=w)
return result
def get_kstats():
"""Collect information on the ZFS subsystem. The step does not perform any
further processing, giving us the option to only work on what is actually
needed. The name "kstat" is a holdover from the Solaris utility of the same
name.
"""
result = {}
for section in SECTION_PATHS.values():
if section not in result:
result[section] = load_kstats(section)
return result
def get_version(request):
"""Get the version number of ZFS or SPL on this machine for header.
Returns an error string, but does not raise an error, if we can't
get the ZFS/SPL version.
"""
if request not in ('spl', 'zfs'):
error_msg = '(ERROR: "{0}" requested)'.format(request)
return error_msg
return get_version_impl(request)
def print_header():
"""Print the initial heading with date and time as well as info on the
kernel and ZFS versions. This is not called for the graph.
"""
# datetime is now recommended over time but we keep the exact formatting
# from the older version of arc_summary in case there are scripts
# that expect it in this way
daydate = time.strftime(DATE_FORMAT)
spc_date = LINE_LENGTH-len(daydate)
sys_version = os.uname()
sys_msg = sys_version.sysname+' '+sys_version.release
zfs = get_version('zfs')
spc_zfs = LINE_LENGTH-len(zfs)
machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')'
spl = get_version('spl')
spc_spl = LINE_LENGTH-len(spl)
print('\n'+('-'*LINE_LENGTH))
print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date))
print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs))
print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl))
def print_raw(kstats_dict):
"""Print all available data from the system in a minimally sorted format.
This can be used as a source to be piped through 'grep'.
"""
sections = sorted(kstats_dict.keys())
for section in sections:
print('\n{0}:'.format(section.upper()))
lines = sorted(kstats_dict[section])
for line in lines:
name, value = cleanup_line(line)
print(format_raw_line(name, value))
# Tunables and SPL must be handled separately because they come from a
# different source and have descriptions the user might request
print()
section_spl()
section_tunables()
def isolate_section(section_name, kstats_dict):
"""From the complete information on all sections, retrieve only those
for one section.
"""
try:
section_data = kstats_dict[section_name]
except KeyError:
print('ERROR: Data on {0} not available'.format(section_data))
sys.exit(1)
section_dict = dict(cleanup_line(l) for l in section_data)
return section_dict
# Formatted output helper functions
def prt_1(text, value):
"""Print text and one value, no indent"""
spc = ' '*(LINE_LENGTH-(len(text)+len(value)))
print('{0}{spc}{1}'.format(text, value, spc=spc))
def prt_i1(text, value):
"""Print text and one value, with indent"""
spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value)))
print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc))
def prt_2(text, value1, value2):
"""Print text and two values, no indent"""
values = '{0:>9} {1:>9}'.format(value1, value2)
spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2))
print('{0}{spc} {1}'.format(text, values, spc=spc))
def prt_i2(text, value1, value2):
"""Print text and two values, with indent"""
values = '{0:>9} {1:>9}'.format(value1, value2)
spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2))
print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc))
# The section output concentrates on important parameters instead of
# being exhaustive (that is what the --raw parameter is for)
def section_arc(kstats_dict):
"""Give basic information on the ARC, MRU and MFU. This is the first
and most used section.
"""
arc_stats = isolate_section('arcstats', kstats_dict)
throttle = arc_stats['memory_throttle_count']
if throttle == '0':
health = 'HEALTHY'
else:
health = 'THROTTLED'
prt_1('ARC status:', health)
prt_i1('Memory throttle count:', throttle)
print()
arc_size = arc_stats['size']
arc_target_size = arc_stats['c']
arc_max = arc_stats['c_max']
arc_min = arc_stats['c_min']
meta = arc_stats['meta']
pd = arc_stats['pd']
pm = arc_stats['pm']
anon_data = arc_stats['anon_data']
anon_metadata = arc_stats['anon_metadata']
mfu_data = arc_stats['mfu_data']
mfu_metadata = arc_stats['mfu_metadata']
mru_data = arc_stats['mru_data']
mru_metadata = arc_stats['mru_metadata']
mfug_data = arc_stats['mfu_ghost_data']
mfug_metadata = arc_stats['mfu_ghost_metadata']
mrug_data = arc_stats['mru_ghost_data']
mrug_metadata = arc_stats['mru_ghost_metadata']
unc_data = arc_stats['uncached_data']
unc_metadata = arc_stats['uncached_metadata']
bonus_size = arc_stats['bonus_size']
dnode_limit = arc_stats['arc_dnode_limit']
dnode_size = arc_stats['dnode_size']
dbuf_size = arc_stats['dbuf_size']
hdr_size = arc_stats['hdr_size']
l2_hdr_size = arc_stats['l2_hdr_size']
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
prt_2('ARC size (current):',
f_perc(arc_size, arc_max), f_bytes(arc_size))
prt_i2('Target size (adaptive):',
f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
prt_i2('Min size (hard limit):',
f_perc(arc_min, arc_max), f_bytes(arc_min))
prt_i2('Max size (high water):',
target_size_ratio, f_bytes(arc_max))
caches_size = int(anon_data)+int(anon_metadata)+\
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
int(unc_data)+int(unc_metadata)
prt_i2('Anonymous data size:',
f_perc(anon_data, caches_size), f_bytes(anon_data))
prt_i2('Anonymous metadata size:',
f_perc(anon_metadata, caches_size), f_bytes(anon_metadata))
s = 4294967296
v = (s-int(pd))*(s-int(meta))/s
prt_i2('MFU data target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MFU data size:',
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
v = (s-int(pm))*int(meta)/s
prt_i2('MFU metadata target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MFU metadata size:',
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
v = int(pd)*(s-int(meta))/s
prt_i2('MRU data target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MRU data size:',
f_perc(mru_data, caches_size), f_bytes(mru_data))
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
v = int(pm)*int(meta)/s
prt_i2('MRU metadata target:', f_perc(v, s),
f_bytes(v / 65536 * caches_size / 65536))
prt_i2('MRU metadata size:',
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
prt_i2('Uncached data size:',
f_perc(unc_data, caches_size), f_bytes(unc_data))
prt_i2('Uncached metadata size:',
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
prt_i2('Bonus size:',
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
prt_i2('Dnode cache target:',
f_perc(dnode_limit, arc_max), f_bytes(dnode_limit))
prt_i2('Dnode cache size:',
f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
prt_i2('Dbuf size:',
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
prt_i2('Header size:',
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
prt_i2('L2 header size:',
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
prt_i2('ABD chunk waste size:',
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
print()
print('ARC hash breakdown:')
prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
prt_i2('Elements current:',
f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
f_hits(arc_stats['hash_elements']))
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
prt_i1('Chains:', f_hits(arc_stats['hash_chains']))
print()
print('ARC misc:')
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
prt_i1('Eviction skips due to L2 writes:',
f_hits(arc_stats['evict_l2_skip']))
prt_i1('L2 cached evictions:', f_bytes(arc_stats['evict_l2_cached']))
prt_i1('L2 eligible evictions:', f_bytes(arc_stats['evict_l2_eligible']))
prt_i2('L2 eligible MFU evictions:',
f_perc(arc_stats['evict_l2_eligible_mfu'],
arc_stats['evict_l2_eligible']),
f_bytes(arc_stats['evict_l2_eligible_mfu']))
prt_i2('L2 eligible MRU evictions:',
f_perc(arc_stats['evict_l2_eligible_mru'],
arc_stats['evict_l2_eligible']),
f_bytes(arc_stats['evict_l2_eligible_mru']))
prt_i1('L2 ineligible evictions:',
f_bytes(arc_stats['evict_l2_ineligible']))
print()
def section_archits(kstats_dict):
"""Print information on how the caches are accessed ("arc hits").
"""
arc_stats = isolate_section('arcstats', kstats_dict)
all_accesses = int(arc_stats['hits'])+int(arc_stats['iohits'])+\
int(arc_stats['misses'])
prt_1('ARC total accesses:', f_hits(all_accesses))
ta_todo = (('Total hits:', arc_stats['hits']),
('Total I/O hits:', arc_stats['iohits']),
('Total misses:', arc_stats['misses']))
for title, value in ta_todo:
prt_i2(title, f_perc(value, all_accesses), f_hits(value))
print()
dd_total = int(arc_stats['demand_data_hits']) +\
int(arc_stats['demand_data_iohits']) +\
int(arc_stats['demand_data_misses'])
prt_2('ARC demand data accesses:', f_perc(dd_total, all_accesses),
f_hits(dd_total))
dd_todo = (('Demand data hits:', arc_stats['demand_data_hits']),
('Demand data I/O hits:', arc_stats['demand_data_iohits']),
('Demand data misses:', arc_stats['demand_data_misses']))
for title, value in dd_todo:
prt_i2(title, f_perc(value, dd_total), f_hits(value))
print()
dm_total = int(arc_stats['demand_metadata_hits']) +\
int(arc_stats['demand_metadata_iohits']) +\
int(arc_stats['demand_metadata_misses'])
prt_2('ARC demand metadata accesses:', f_perc(dm_total, all_accesses),
f_hits(dm_total))
dm_todo = (('Demand metadata hits:', arc_stats['demand_metadata_hits']),
('Demand metadata I/O hits:',
arc_stats['demand_metadata_iohits']),
('Demand metadata misses:', arc_stats['demand_metadata_misses']))
for title, value in dm_todo:
prt_i2(title, f_perc(value, dm_total), f_hits(value))
print()
pd_total = int(arc_stats['prefetch_data_hits']) +\
int(arc_stats['prefetch_data_iohits']) +\
int(arc_stats['prefetch_data_misses'])
prt_2('ARC prefetch metadata accesses:', f_perc(pd_total, all_accesses),
f_hits(pd_total))
pd_todo = (('Prefetch data hits:', arc_stats['prefetch_data_hits']),
('Prefetch data I/O hits:', arc_stats['prefetch_data_iohits']),
('Prefetch data misses:', arc_stats['prefetch_data_misses']))
for title, value in pd_todo:
prt_i2(title, f_perc(value, pd_total), f_hits(value))
print()
pm_total = int(arc_stats['prefetch_metadata_hits']) +\
int(arc_stats['prefetch_metadata_iohits']) +\
int(arc_stats['prefetch_metadata_misses'])
prt_2('ARC prefetch metadata accesses:', f_perc(pm_total, all_accesses),
f_hits(pm_total))
pm_todo = (('Prefetch metadata hits:',
arc_stats['prefetch_metadata_hits']),
('Prefetch metadata I/O hits:',
arc_stats['prefetch_metadata_iohits']),
('Prefetch metadata misses:',
arc_stats['prefetch_metadata_misses']))
for title, value in pm_todo:
prt_i2(title, f_perc(value, pm_total), f_hits(value))
print()
all_prefetches = int(arc_stats['predictive_prefetch'])+\
int(arc_stats['prescient_prefetch'])
prt_2('ARC predictive prefetches:',
f_perc(arc_stats['predictive_prefetch'], all_prefetches),
f_hits(arc_stats['predictive_prefetch']))
prt_i2('Demand hits after predictive:',
f_perc(arc_stats['demand_hit_predictive_prefetch'],
arc_stats['predictive_prefetch']),
f_hits(arc_stats['demand_hit_predictive_prefetch']))
prt_i2('Demand I/O hits after predictive:',
f_perc(arc_stats['demand_iohit_predictive_prefetch'],
arc_stats['predictive_prefetch']),
f_hits(arc_stats['demand_iohit_predictive_prefetch']))
never = int(arc_stats['predictive_prefetch']) -\
int(arc_stats['demand_hit_predictive_prefetch']) -\
int(arc_stats['demand_iohit_predictive_prefetch'])
prt_i2('Never demanded after predictive:',
f_perc(never, arc_stats['predictive_prefetch']),
f_hits(never))
print()
prt_2('ARC prescient prefetches:',
f_perc(arc_stats['prescient_prefetch'], all_prefetches),
f_hits(arc_stats['prescient_prefetch']))
prt_i2('Demand hits after prescient:',
f_perc(arc_stats['demand_hit_prescient_prefetch'],
arc_stats['prescient_prefetch']),
f_hits(arc_stats['demand_hit_prescient_prefetch']))
prt_i2('Demand I/O hits after prescient:',
f_perc(arc_stats['demand_iohit_prescient_prefetch'],
arc_stats['prescient_prefetch']),
f_hits(arc_stats['demand_iohit_prescient_prefetch']))
never = int(arc_stats['prescient_prefetch'])-\
int(arc_stats['demand_hit_prescient_prefetch'])-\
int(arc_stats['demand_iohit_prescient_prefetch'])
prt_i2('Never demanded after prescient:',
f_perc(never, arc_stats['prescient_prefetch']),
f_hits(never))
print()
print('ARC states hits of all accesses:')
cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']),
('Most recently used (MRU):', arc_stats['mru_hits']),
('Most frequently used (MFU) ghost:',
arc_stats['mfu_ghost_hits']),
('Most recently used (MRU) ghost:',
arc_stats['mru_ghost_hits']),
('Uncached:', arc_stats['uncached_hits']))
for title, value in cl_todo:
prt_i2(title, f_perc(value, all_accesses), f_hits(value))
print()
def section_dmu(kstats_dict):
"""Collect information on the DMU"""
zfetch_stats = isolate_section('zfetchstats', kstats_dict)
zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
prt_i2('Stream hits:',
f_perc(zfetch_stats['hits'], zfetch_access_total),
f_hits(zfetch_stats['hits']))
prt_i2('Stream misses:',
f_perc(zfetch_stats['misses'], zfetch_access_total),
f_hits(zfetch_stats['misses']))
prt_i2('Streams limit reached:',
f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
f_hits(zfetch_stats['max_streams']))
prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
print()
def section_l2arc(kstats_dict):
"""Collect information on L2ARC device if present. If not, tell user
that we're skipping the section.
"""
# The L2ARC statistics live in the same section as the normal ARC stuff
arc_stats = isolate_section('arcstats', kstats_dict)
if arc_stats['l2_size'] == '0':
print('L2ARC not detected, skipping section\n')
return
l2_errors = int(arc_stats['l2_writes_error']) +\
int(arc_stats['l2_cksum_bad']) +\
int(arc_stats['l2_io_error'])
l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses'])
health = 'HEALTHY'
if l2_errors > 0:
health = 'DEGRADED'
prt_1('L2ARC status:', health)
l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'),
('Free on write:', 'l2_free_on_write'),
('R/W clashes:', 'l2_rw_clash'),
('Bad checksums:', 'l2_cksum_bad'),
- ('I/O errors:', 'l2_io_error'))
+ ('Read errors:', 'l2_io_error'),
+ ('Write errors:', 'l2_writes_error'))
for title, value in l2_todo:
prt_i1(title, f_hits(arc_stats[value]))
print()
prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size']))
prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']),
f_bytes(arc_stats['l2_asize']))
prt_i2('Header size:',
f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']),
f_bytes(arc_stats['l2_hdr_size']))
prt_i2('MFU allocated size:',
f_perc(arc_stats['l2_mfu_asize'], arc_stats['l2_asize']),
f_bytes(arc_stats['l2_mfu_asize']))
prt_i2('MRU allocated size:',
f_perc(arc_stats['l2_mru_asize'], arc_stats['l2_asize']),
f_bytes(arc_stats['l2_mru_asize']))
prt_i2('Prefetch allocated size:',
f_perc(arc_stats['l2_prefetch_asize'], arc_stats['l2_asize']),
f_bytes(arc_stats['l2_prefetch_asize']))
prt_i2('Data (buffer content) allocated size:',
f_perc(arc_stats['l2_bufc_data_asize'], arc_stats['l2_asize']),
f_bytes(arc_stats['l2_bufc_data_asize']))
prt_i2('Metadata (buffer content) allocated size:',
f_perc(arc_stats['l2_bufc_metadata_asize'], arc_stats['l2_asize']),
f_bytes(arc_stats['l2_bufc_metadata_asize']))
print()
prt_1('L2ARC breakdown:', f_hits(l2_access_total))
prt_i2('Hit ratio:',
f_perc(arc_stats['l2_hits'], l2_access_total),
f_hits(arc_stats['l2_hits']))
prt_i2('Miss ratio:',
f_perc(arc_stats['l2_misses'], l2_access_total),
f_hits(arc_stats['l2_misses']))
- prt_i1('Feeds:', f_hits(arc_stats['l2_feeds']))
print()
- print('L2ARC writes:')
-
- if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']:
- prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent']))
- prt_i2('Done ratio:',
- f_perc(arc_stats['l2_writes_done'],
- arc_stats['l2_writes_sent']),
- f_hits(arc_stats['l2_writes_done']))
- prt_i2('Error ratio:',
- f_perc(arc_stats['l2_writes_error'],
- arc_stats['l2_writes_sent']),
- f_hits(arc_stats['l2_writes_error']))
- else:
- prt_i2('Writes sent:', '100 %', f_hits(arc_stats['l2_writes_sent']))
+ print('L2ARC I/O:')
+ prt_i2('Reads:',
+ f_bytes(arc_stats['l2_read_bytes']),
+ f_hits(arc_stats['l2_hits']))
+ prt_i2('Writes:',
+ f_bytes(arc_stats['l2_write_bytes']),
+ f_hits(arc_stats['l2_writes_sent']))
print()
print('L2ARC evicts:')
- prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry']))
- prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading']))
+ prt_i1('L1 cached:', f_hits(arc_stats['l2_evict_l1cached']))
+ prt_i1('While reading:', f_hits(arc_stats['l2_evict_reading']))
print()
def section_spl(*_):
"""Print the SPL parameters, if requested with alternative format
and/or descriptions. This does not use kstats.
"""
if sys.platform.startswith('freebsd'):
# No SPL support in FreeBSD
return
spls = get_spl_params()
keylist = sorted(spls.keys())
print('Solaris Porting Layer (SPL):')
if ARGS.desc:
descriptions = get_descriptions('spl')
for key in keylist:
value = spls[key]
if ARGS.desc:
try:
print(INDENT+'#', descriptions[key])
except KeyError:
print(INDENT+'# (No description found)') # paranoid
print(format_raw_line(key, value))
print()
def section_tunables(*_):
"""Print the tunables, if requested with alternative format and/or
descriptions. This does not use kstasts.
"""
tunables = get_tunable_params()
keylist = sorted(tunables.keys())
print('Tunables:')
if ARGS.desc:
descriptions = get_descriptions('zfs')
for key in keylist:
value = tunables[key]
if ARGS.desc:
try:
print(INDENT+'#', descriptions[key])
except KeyError:
print(INDENT+'# (No description found)') # paranoid
print(format_raw_line(key, value))
print()
-def section_vdev(kstats_dict):
- """Collect information on VDEV caches"""
-
- # Currently [Nov 2017] the VDEV cache is disabled, because it is actually
- # harmful. When this is the case, we just skip the whole entry. See
- # https://github.com/openzfs/zfs/blob/master/module/zfs/vdev_cache.c
- # for details
- tunables = get_vdev_params()
-
- if tunables[VDEV_CACHE_SIZE] == '0':
- print('VDEV cache disabled, skipping section\n')
- return
-
- vdev_stats = isolate_section('vdev_cache_stats', kstats_dict)
-
- vdev_cache_total = int(vdev_stats['hits']) +\
- int(vdev_stats['misses']) +\
- int(vdev_stats['delegations'])
-
- prt_1('VDEV cache summary:', f_hits(vdev_cache_total))
- prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total),
- f_hits(vdev_stats['hits']))
- prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total),
- f_hits(vdev_stats['misses']))
- prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total),
- f_hits(vdev_stats['delegations']))
- print()
-
-
def section_zil(kstats_dict):
"""Collect information on the ZFS Intent Log. Some of the information
taken from https://github.com/openzfs/zfs/blob/master/include/sys/zil.h
"""
zil_stats = isolate_section('zil', kstats_dict)
prt_1('ZIL committed transactions:',
f_hits(zil_stats['zil_itx_count']))
prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count']))
prt_i1('Flushes to stable storage:',
f_hits(zil_stats['zil_commit_writer_count']))
prt_i2('Transactions to SLOG storage pool:',
f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']),
f_hits(zil_stats['zil_itx_metaslab_slog_count']))
prt_i2('Transactions to non-SLOG storage pool:',
f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']),
f_hits(zil_stats['zil_itx_metaslab_normal_count']))
print()
section_calls = {'arc': section_arc,
'archits': section_archits,
'dmu': section_dmu,
'l2arc': section_l2arc,
'spl': section_spl,
'tunables': section_tunables,
- 'vdev': section_vdev,
'zil': section_zil}
def main():
"""Run program. The options to draw a graph and to print all data raw are
treated separately because they come with their own call.
"""
kstats = get_kstats()
if ARGS.graph:
draw_graph(kstats)
sys.exit(0)
print_header()
if ARGS.raw:
print_raw(kstats)
elif ARGS.section:
try:
section_calls[ARGS.section](kstats)
except KeyError:
print('Error: Section "{0}" unknown'.format(ARGS.section))
sys.exit(1)
elif ARGS.page:
print('WARNING: Pages are deprecated, please use "--section"\n')
pages_to_calls = {1: 'arc',
2: 'archits',
3: 'l2arc',
4: 'dmu',
5: 'vdev',
6: 'tunables'}
try:
call = pages_to_calls[ARGS.page]
except KeyError:
print('Error: Page "{0}" not supported'.format(ARGS.page))
sys.exit(1)
else:
section_calls[call](kstats)
else:
# If no parameters were given, we print all sections. We might want to
# change the sequence by hand
calls = sorted(section_calls.keys())
for section in calls:
section_calls[section](kstats)
sys.exit(0)
if __name__ == '__main__':
main()
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 5ab13b470dc0..04a10c4eedd7 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,9252 +1,9338 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
+ * Copyright (c) 2023, Klara Inc.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <getopt.h>
#include <openssl/evp.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
/* Some platforms require part of inode IDs to be remapped */
#ifdef __APPLE__
#define ZDB_MAP_OBJECT_ID(obj) INO_XNUTOZFS(obj, 2)
#else
#define ZDB_MAP_OBJECT_ID(obj) (obj)
#endif
static const char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern uint_t zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern boolean_t spa_mode_readable_spacemaps;
extern uint_t zfs_reconstruct_indirect_combinations_max;
extern uint_t zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
static uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
static zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
- zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare,
+ zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare, NULL,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
(void) args;
sublivelist_verify_t sv;
- zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
+ zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb = {{{0}}};
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
- zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
+ zfs_btree_create(&sv.sv_leftover, livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
- livelist_block_compare,
+ livelist_block_compare, NULL,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] [-K <key>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
+ "\t%s -B [-e [-V] [-p <path> ...]] [-I <inflight I/Os>]\n"
+ "\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
+ "\t\t[-K <key>] <poolname>/<objset id> [<backupflags>]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O [-K <key>] <dataset> <path>\n"
"\t%s -r [-K <key>] <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
- cmdname, cmdname, cmdname, cmdname);
+ cmdname, cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b --block-stats "
"block statistics\n");
+ (void) fprintf(stderr, " -B --backup "
+ "backup stream\n");
(void) fprintf(stderr, " -c --checksum "
"checksum all metadata (twice for all data) blocks\n");
(void) fprintf(stderr, " -C --config "
"config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d --datasets "
"dataset(s)\n");
(void) fprintf(stderr, " -D --dedup-stats "
"dedup statistics\n");
(void) fprintf(stderr, " -E --embedded-block-pointer=INTEGER\n"
" decode and display block "
"from an embedded block pointer\n");
(void) fprintf(stderr, " -h --history "
"pool history\n");
(void) fprintf(stderr, " -i --intent-logs "
"intent logs\n");
(void) fprintf(stderr, " -l --label "
"read label contents\n");
(void) fprintf(stderr, " -k --checkpointed-state "
"examine the checkpointed state of the pool\n");
(void) fprintf(stderr, " -L --disable-leak-tracking "
"disable leak tracking (do not load spacemaps)\n");
(void) fprintf(stderr, " -m --metaslabs "
"metaslabs\n");
(void) fprintf(stderr, " -M --metaslab-groups "
"metaslab groups\n");
(void) fprintf(stderr, " -O --object-lookups "
"perform object lookups by path\n");
(void) fprintf(stderr, " -r --copy-object "
"copy an object by path to file\n");
(void) fprintf(stderr, " -R --read-block "
"read and display block from a device\n");
(void) fprintf(stderr, " -s --io-stats "
"report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S --simulate-dedup "
"simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v --verbose "
"verbose (applies to all others)\n");
(void) fprintf(stderr, " -y --livelist "
"perform livelist and metaslab validation on any livelists being "
"deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A --ignore-assertions "
"ignore assertions (-A), enable panic recovery (-AA) or both "
"(-AAA)\n");
(void) fprintf(stderr, " -e --exported "
"pool is exported/destroyed/has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F --automatic-rewind "
"attempt automatic rewind within safe range of transaction "
"groups\n");
(void) fprintf(stderr, " -G --dump-debug-msg "
"dump zfs_dbgmsg buffer before exiting\n");
(void) fprintf(stderr, " -I --inflight=INTEGER "
"specify the maximum number of checksumming I/Os "
"[default is 200]\n");
(void) fprintf(stderr, " -K --key=KEY "
"decryption key for encrypted dataset\n");
(void) fprintf(stderr, " -o --option=\"OPTION=INTEGER\" "
"set global variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p --path==PATH "
"use one or more with -e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P --parseable "
"print numbers in parseable form\n");
(void) fprintf(stderr, " -q --skip-label "
"don't print label contents\n");
(void) fprintf(stderr, " -t --txg=INTEGER "
"highest txg to use when searching for uberblocks\n");
(void) fprintf(stderr, " -u --uberblock "
"uberblock\n");
(void) fprintf(stderr, " -U --cachefile=PATH "
"use alternate cachefile\n");
(void) fprintf(stderr, " -V --verbatim "
"do verbatim import\n");
(void) fprintf(stderr, " -x --dump-blocks=PATH "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X --extreme-rewind "
"attempt extreme rewind (does not work with dataset)\n");
(void) fprintf(stderr, " -Y --all-reconstruction "
"attempt all reconstruction combinations for split blocks\n");
(void) fprintf(stderr, " -Z --zstd-headers "
"show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) size;
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) size;
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, buflen);
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] == 0)
continue;
if (histo[i] > max)
max = histo[i];
if (i > maxidx)
maxidx = i;
if (i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
if (data == NULL)
kmem_free(arr, oursize);
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
_Static_assert(sizeof (maxbuf) >= NN_NUMBUF_SZ, "maxbuf truncated");
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa, boolean_t show_special)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
metaslab_class_t *smc = spa_special_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || (mg->mg_class != mc &&
(!show_special || mg->mg_class != smc)))
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
abd_t *pabd;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" cksum=%016llx:%016llx:%016llx:%016llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (int j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (nice) >= NN_NUMBUF_SZ, "nice truncated");
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object;
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
_Static_assert(sizeof (used) >= NN_NUMBUF_SZ, "used truncated");
_Static_assert(sizeof (compressed) >= NN_NUMBUF_SZ,
"compressed truncated");
_Static_assert(sizeof (uncompressed) >= NN_NUMBUF_SZ,
"uncompressed truncated");
_Static_assert(sizeof (unique) >= NN_NUMBUF_SZ, "unique truncated");
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
(void) arg, (void) tx;
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
int len;
dmu_objset_name(os, osname);
len = snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name);
VERIFY3S(len, <, ZFS_MAX_DATASET_NAME_LEN);
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, const char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
_Static_assert(sizeof (bytes) >= NN_NUMBUF_SZ, "bytes truncated");
_Static_assert(sizeof (comp) >= NN_NUMBUF_SZ, "comp truncated");
_Static_assert(sizeof (uncomp) >= NN_NUMBUF_SZ, "uncomp truncated");
_Static_assert(sizeof (entries) >= NN_NUMBUF_SZ, "entries truncated");
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) putchar('\n');
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static char *key_material = NULL;
static boolean_t
zdb_derive_key(dsl_dir_t *dd, uint8_t *key_out)
{
uint64_t keyformat, salt, iters;
int i;
unsigned char c;
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), sizeof (uint64_t),
1, &keyformat));
switch (keyformat) {
case ZFS_KEYFORMAT_HEX:
for (i = 0; i < WRAPPING_KEY_LEN * 2; i += 2) {
if (!isxdigit(key_material[i]) ||
!isxdigit(key_material[i+1]))
return (B_FALSE);
if (sscanf(&key_material[i], "%02hhx", &c) != 1)
return (B_FALSE);
key_out[i / 2] = c;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
sizeof (uint64_t), 1, &salt));
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset,
dd->dd_crypto_obj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
sizeof (uint64_t), 1, &iters));
if (PKCS5_PBKDF2_HMAC_SHA1(key_material, strlen(key_material),
((uint8_t *)&salt), sizeof (uint64_t), iters,
WRAPPING_KEY_LEN, key_out) != 1)
return (B_FALSE);
break;
default:
fatal("no support for key format %u\n",
(unsigned int) keyformat);
}
return (B_TRUE);
}
static char encroot[ZFS_MAX_DATASET_NAME_LEN];
static boolean_t key_loaded = B_FALSE;
static void
zdb_load_key(objset_t *os)
{
dsl_pool_t *dp;
dsl_dir_t *dd, *rdd;
uint8_t key[WRAPPING_KEY_LEN];
uint64_t rddobj;
int err;
dp = spa_get_dsl(os->os_spa);
dd = os->os_dsl_dataset->ds_dir;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, sizeof (uint64_t), 1, &rddobj));
VERIFY0(dsl_dir_hold_obj(dd->dd_pool, rddobj, NULL, FTAG, &rdd));
dsl_dir_name(rdd, encroot);
dsl_dir_rele(rdd, FTAG);
if (!zdb_derive_key(dd, key))
fatal("couldn't derive encryption key");
dsl_pool_config_exit(dp, FTAG);
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_UNAVAILABLE);
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args;
crypto_args = fnvlist_alloc();
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)key, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
NULL, crypto_args, &dcp));
err = spa_keystore_load_wkey(encroot, dcp, B_FALSE);
dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err != 0)
fatal(
"couldn't load encryption key for %s: %s",
encroot, err == ZFS_ERR_CRYPTO_NOTSUP ?
"crypto params not supported" : strerror(err));
ASSERT3U(dsl_dataset_get_keystatus(dd), ==, ZFS_KEYSTATUS_AVAILABLE);
printf("Unlocked encryption root: %s\n", encroot);
key_loaded = B_TRUE;
}
static void
zdb_unload_key(void)
{
if (!key_loaded)
return;
VERIFY0(spa_keystore_unload_wkey(encroot));
key_loaded = B_FALSE;
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, const void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
if (dump_opt['K']) {
/* decryption requested, try to load keys */
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset "
"'%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
/* succeeds or dies */
zdb_load_key(*osp);
/* release it all */
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
}
int ds_hold_flags = key_loaded ? DS_HOLD_FLAG_DECRYPT : 0;
err = dmu_objset_hold_flags(path, ds_hold_flags, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS &&
(key_loaded || !(*osp)->os_encrypted)) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele_flags(dmu_objset_ds(*osp),
ds_hold_flags, tag);
*osp = NULL;
}
}
sa_os = *osp;
return (err);
}
static void
close_objset(objset_t *os, const void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele_flags(dmu_objset_ds(os),
key_loaded ? DS_HOLD_FLAG_DECRYPT : 0, tag);
sa_attr_table = NULL;
sa_os = NULL;
zdb_unload_key();
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
const char *domain =
zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
boolean_t can_print = !dump_opt['P'];
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (!isprint(value[idx])) {
can_print = B_FALSE;
break;
}
}
for (idx = 0; idx < cnt; ++idx) {
if (can_print)
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_symlink_size >= sizeof (linktarget)) {
(void) printf("symlink size %d is too large\n",
sa_symlink_size);
return;
}
linktarget[sa_symlink_size] = '\0';
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) data, (void) size;
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) os, (void) object, (void) data, (void) size;
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
_Static_assert(sizeof (iblk) >= NN_NUMBUF_SZ, "iblk truncated");
_Static_assert(sizeof (dblk) >= NN_NUMBUF_SZ, "dblk truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ, "lsize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ, "asize truncated");
_Static_assert(sizeof (bonus_size) >= NN_NUMBUF_SZ,
"bonus_size truncated");
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (!key_loaded && os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) snprintf(fill, sizeof (fill), "%6.2f", 100.0 *
doi.doi_fill_count * doi.doi_data_block_size / (object == 0 ?
DNODES_PER_BLOCK : 1) / doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (key_loaded ||
(!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type))) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5) {
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
DN_SPILL_BLKPTR(dn->dn_phys), B_FALSE);
(void) printf("\nSpill block: %s\n", blkbuf);
}
dump_indirect(dn);
}
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
_Static_assert(sizeof (segsize) >= NN_NUMBUF_SZ,
"segsize truncated");
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *const objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, const char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
_Static_assert(sizeof (numbuf) >= NN_NUMBUF_SZ, "numbuf truncated");
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, ctime(&timestamp));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
const char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference = 0;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(const char *prefix, const cksum_record_t *rec)
{
fputs(prefix, stdout);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
putchar('\n');
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
uint64_t label_offset;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
boolean_t cksum_valid;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d %s\n", l,
label->cksum_valid ? "" : "(Bad label cksum)");
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(const l2arc_log_blkptr_t *lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps->lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps->lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps->lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE(lbps->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE(lbps->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS(lbps->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM(lbps->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, const l2arc_dev_hdr_phys_t *l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr->dh_evict;
dev.l2ad_start = l2dhdr->dh_start;
dev.l2ad_end = l2dhdr->dh_end;
if (l2dhdr->dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(&lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
if (zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL) != 0) {
(void) printf("L2ARC block decompression "
"failed\n");
abd_free(abd);
goto out;
}
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(&lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr->dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
out:
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr = {0}, rebuild = {0};
int error = B_FALSE;
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, &l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
zfs_fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
+static int
+dump_backup_bytes(objset_t *os, void *buf, int len, void *arg)
+{
+ const char *p = (const char *)buf;
+ ssize_t nwritten;
+
+ (void) os;
+ (void) arg;
+
+ /* Write the data out, handling short writes and signals. */
+ while ((nwritten = write(STDOUT_FILENO, p, len)) < len) {
+ if (nwritten < 0) {
+ if (errno == EINTR)
+ continue;
+ return (errno);
+ }
+ p += nwritten;
+ len -= nwritten;
+ }
+
+ return (0);
+}
+
+static void
+dump_backup(const char *pool, uint64_t objset_id, const char *flagstr)
+{
+ boolean_t embed = B_FALSE;
+ boolean_t large_block = B_FALSE;
+ boolean_t compress = B_FALSE;
+ boolean_t raw = B_FALSE;
+
+ const char *c;
+ for (c = flagstr; c != NULL && *c != '\0'; c++) {
+ switch (*c) {
+ case 'e':
+ embed = B_TRUE;
+ break;
+ case 'L':
+ large_block = B_TRUE;
+ break;
+ case 'c':
+ compress = B_TRUE;
+ break;
+ case 'w':
+ raw = B_TRUE;
+ break;
+ default:
+ fprintf(stderr, "dump_backup: invalid flag "
+ "'%c'\n", *c);
+ return;
+ }
+ }
+
+ if (isatty(STDOUT_FILENO)) {
+ fprintf(stderr, "dump_backup: stream cannot be written "
+ "to a terminal\n");
+ return;
+ }
+
+ offset_t off = 0;
+ dmu_send_outparams_t out = {
+ .dso_outfunc = dump_backup_bytes,
+ .dso_dryrun = B_FALSE,
+ };
+
+ int err = dmu_send_obj(pool, objset_id, /* fromsnap */0, embed,
+ large_block, compress, raw, /* saved */ B_FALSE, STDOUT_FILENO,
+ &off, &out);
+ if (err != 0) {
+ fprintf(stderr, "dump_backup: dmu_send_obj: %s\n",
+ strerror(err));
+ return;
+ }
+}
+
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1)
return (errno);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
(void) close(fd);
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
(void) close(fd);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static boolean_t
label_cksum_valid(vdev_label_t *label, uint64_t offset)
{
zio_checksum_info_t *ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL];
zio_cksum_t expected_cksum;
zio_cksum_t actual_cksum;
zio_cksum_t verifier;
zio_eck_t *eck;
int byteswap;
void *data = (char *)label + offsetof(vdev_label_t, vl_vdev_phys);
eck = (zio_eck_t *)((char *)(data) + VDEV_PHYS_SIZE) - 1;
offset += offsetof(vdev_label_t, vl_vdev_phys);
ZIO_SET_CHECKSUM(&verifier, offset, 0, 0, 0);
byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck->zec_cksum;
eck->zec_cksum = verifier;
abd_t *abd = abd_get_from_buf(data, VDEV_PHYS_SIZE);
ci->ci_func[byteswap](abd, VDEV_PHYS_SIZE, NULL, &actual_cksum);
abd_free(abd);
if (byteswap)
byteswap_uint64_array(&expected_cksum, sizeof (zio_cksum_t));
if (ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (B_TRUE);
return (B_FALSE);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS] = {{{{0}}}};
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Verify label cksum
* 3. Unpack the configuration and insert in config tree.
* 4. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
label->label_offset = vdev_label_offset(psize, l, 0);
if (pread64(fd, &label->label, sizeof (label->label),
label->label_offset) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
label->cksum_valid = label_cksum_valid(&label->label,
label->label_offset);
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device clear the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
static int
dump_one_objset(const char *dsname, void *arg)
{
(void) arg;
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
const char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
uint64_t kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
uint64_t sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
_Static_assert(sizeof (buf) >= NN_NUMBUF_SZ, "buf truncated");
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4"PRIu64"MB/s) "
"estimated time remaining: "
"%"PRIu64"hr %02"PRIu64"min %02"PRIu64"sec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset, (void) arg;
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb = {0};
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1ULL << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
obsolete_bytes += 1ULL << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
(void) tx;
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t *zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
zcb = umem_zalloc(sizeof (zdb_cb_t), UMEM_NOFAIL);
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
zdb_leak_init(spa, zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, zcb, NULL);
}
zdb_claim_removing(spa, zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
zcb, NULL));
}
deleted_livelists_count_blocks(spa, zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb->zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb->zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb->zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb->zcb_start = zcb->zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb->zcb_haderrors |= err;
if (zcb->zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb->zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb->zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, zcb);
tzb = &zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
total_found = tzb->zb_asize - zcb->zcb_dedup_asize +
zcb->zcb_removing_size + zcb->zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb->zcb_dedup_asize,
(u_longlong_t)zcb->zcb_dedup_blocks,
(double)zcb->zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb->zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb->zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb->zcb_embedded_histogram[i],
sizeof (zcb->zcb_embedded_histogram[i]) /
sizeof (zcb->zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
zfs_blkstat_t *mdstats = umem_zalloc(sizeof (zfs_blkstat_t),
UMEM_NOFAIL);
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
const char *typename;
/* make sure nicenum has enough space */
_Static_assert(sizeof (csize) >= NN_NUMBUF_SZ,
"csize truncated");
_Static_assert(sizeof (lsize) >= NN_NUMBUF_SZ,
"lsize truncated");
_Static_assert(sizeof (psize) >= NN_NUMBUF_SZ,
"psize truncated");
_Static_assert(sizeof (asize) >= NN_NUMBUF_SZ,
"asize truncated");
_Static_assert(sizeof (avg) >= NN_NUMBUF_SZ,
"avg truncated");
_Static_assert(sizeof (gang) >= NN_NUMBUF_SZ,
"gang truncated");
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb->zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb->zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (level != ZB_TOTAL && t < DMU_OT_NUMTYPES &&
(level > 0 || DMU_OT_IS_METADATA(t))) {
mdstats->zb_count += zb->zb_count;
mdstats->zb_lsize += zb->zb_lsize;
mdstats->zb_psize += zb->zb_psize;
mdstats->zb_asize += zb->zb_asize;
mdstats->zb_gangs += zb->zb_gangs;
}
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb->zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
zdb_nicenum(mdstats->zb_count, csize,
sizeof (csize));
zdb_nicenum(mdstats->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(mdstats->zb_psize, psize,
sizeof (psize));
zdb_nicenum(mdstats->zb_asize, asize,
sizeof (asize));
zdb_nicenum(mdstats->zb_asize / mdstats->zb_count, avg,
sizeof (avg));
zdb_nicenum(mdstats->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)mdstats->zb_lsize / mdstats->zb_psize,
100.0 * mdstats->zb_asize / tzb->zb_asize);
(void) printf("%s\n", "Metadata Total");
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(zcb);
}
umem_free(mdstats, sizeof (zfs_blkstat_t));
}
(void) printf("\n");
if (leaks) {
umem_free(zcb, sizeof (zdb_cb_t));
return (2);
}
if (zcb->zcb_haderrors) {
umem_free(zcb, sizeof (zdb_cb_t));
return (3);
}
umem_free(zcb, sizeof (zdb_cb_t));
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
(void) zilog, (void) dnp;
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total = {{{0}}};
ddt_stat_t dds_total = {0};
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1) {
if (target != poolname)
free(poolname);
return (NULL);
}
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
free(bogus_name);
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
(void) arg;
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_root_zap != 0)
mos_obj_refd(vd->vdev_root_zap);
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static void
errorlog_count_refd(objset_t *mos, uint64_t errlog)
{
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, errlog);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
mos_obj_refd(za.za_first_integer);
}
zap_cursor_fini(&zc);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
errorlog_count_refd(mos, spa->spa_errlog_last);
errorlog_count_refd(mos, spa->spa_errlog_scrub);
}
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
VERIFY0(dmu_object_info(mos, object, &doi));
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos = {0};
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa, dump_opt['M'] > 1);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
(void) buf;
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
memcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *flagstr, *sizes, *tmp = NULL;
const char *vdev, *errmsg = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ?: "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ?: "");
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
errmsg = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
errmsg = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
errmsg = "offset must be a multiple of sector size";
if (errmsg) {
(void) printf("Invalid block specifier: %s - %s\n",
thing, errmsg);
goto done;
}
tmp = NULL;
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
goto done;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
- ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
- ZIO_FLAG_OPTIONAL, NULL, NULL));
+ ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
+ NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_ONLY) == B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
- ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf(
"%12s\t"
"cksum=%016llx:%016llx:%016llx:%016llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp = {{{{0}}}};
unsigned long long *words = (void *)&bp;
char *buf;
int err;
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
/* check for valid hex or decimal numeric string */
static boolean_t
zdb_numeric(char *str)
{
int i = 0;
if (strlen(str) == 0)
return (B_FALSE);
if (strncmp(str, "0x", 2) == 0 || strncmp(str, "0X", 2) == 0)
i = 2;
for (; i < strlen(str); i++) {
if (!isxdigit(str[i]))
return (B_FALSE);
}
return (B_TRUE);
}
int
main(int argc, char **argv)
{
int c;
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
struct option long_options[] = {
{"ignore-assertions", no_argument, NULL, 'A'},
{"block-stats", no_argument, NULL, 'b'},
+ {"backup", no_argument, NULL, 'B'},
{"checksum", no_argument, NULL, 'c'},
{"config", no_argument, NULL, 'C'},
{"datasets", no_argument, NULL, 'd'},
{"dedup-stats", no_argument, NULL, 'D'},
{"exported", no_argument, NULL, 'e'},
{"embedded-block-pointer", no_argument, NULL, 'E'},
{"automatic-rewind", no_argument, NULL, 'F'},
{"dump-debug-msg", no_argument, NULL, 'G'},
{"history", no_argument, NULL, 'h'},
{"intent-logs", no_argument, NULL, 'i'},
{"inflight", required_argument, NULL, 'I'},
{"checkpointed-state", no_argument, NULL, 'k'},
{"key", required_argument, NULL, 'K'},
{"label", no_argument, NULL, 'l'},
{"disable-leak-tracking", no_argument, NULL, 'L'},
{"metaslabs", no_argument, NULL, 'm'},
{"metaslab-groups", no_argument, NULL, 'M'},
{"numeric", no_argument, NULL, 'N'},
{"option", required_argument, NULL, 'o'},
{"object-lookups", no_argument, NULL, 'O'},
{"path", required_argument, NULL, 'p'},
{"parseable", no_argument, NULL, 'P'},
{"skip-label", no_argument, NULL, 'q'},
{"copy-object", no_argument, NULL, 'r'},
{"read-block", no_argument, NULL, 'R'},
{"io-stats", no_argument, NULL, 's'},
{"simulate-dedup", no_argument, NULL, 'S'},
{"txg", required_argument, NULL, 't'},
{"uberblock", no_argument, NULL, 'u'},
{"cachefile", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"verbatim", no_argument, NULL, 'V'},
{"dump-blocks", required_argument, NULL, 'x'},
{"extreme-rewind", no_argument, NULL, 'X'},
{"all-reconstruction", no_argument, NULL, 'Y'},
{"livelist", no_argument, NULL, 'y'},
{"zstd-headers", no_argument, NULL, 'Z'},
{0, 0, 0, 0}
};
while ((c = getopt_long(argc, argv,
- "AbcCdDeEFGhiI:kK:lLmMNo:Op:PqrRsSt:uU:vVx:XYyZ",
+ "AbBcCdDeEFGhiI:kK:lLmMNo:Op:PqrRsSt:uU:vVx:XYyZ",
long_options, NULL)) != -1) {
switch (c) {
case 'b':
+ case 'B':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'N':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'K':
dump_opt[c]++;
key_material = strdup(optarg);
/* redact key material in process table */
while (*optarg != '\0') { *optarg++ = '*'; }
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
memcpy(tmp, searchdirs, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
/*
* ZDB should have ability to read spacemaps.
*/
spa_mode_readable_spacemaps = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
- if (dump_all && strchr("AeEFkKlLNOPrRSXy", c) == NULL)
+ if (dump_all && strchr("ABeEFkKlLNOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_set_assert_ok((dump_opt['A'] == 1) || (dump_opt['A'] > 2));
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
target_is_spa = B_FALSE;
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
if (error != 0)
fatal("internal error: %s", strerror(error));
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
/* -N implies -d */
if (dump_opt['N'] && dump_opt['d'] == 0)
dump_opt['d'] = dump_opt['N'];
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
/*
* See if an objset ID was supplied (-d <pool>/<objset ID>).
* To disambiguate tank/100, consider the 100 as objsetID
* if -N was given, otherwise 100 is an objsetID iff
* tank/100 as a named dataset fails on lookup.
*/
objset_str = strchr(target, '/');
if (objset_str && strlen(objset_str) > 1 &&
zdb_numeric(objset_str + 1)) {
char *endptr;
errno = 0;
objset_str++;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
if (dump_opt['N'])
dataset_lookup = B_TRUE;
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
exit(1);
}
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
libpc_handle_t lpch = {
.lpc_lib_handle = NULL,
.lpc_ops = &libzpool_config_ops,
.lpc_printerr = B_TRUE
};
error = zpool_find_config(&lpch, target_pool, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
- } else if (target_is_spa || dump_opt['R'] || objset_id == 0) {
+ } else if (target_is_spa || dump_opt['R'] || dump_opt['B'] ||
+ objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
target_pool = strdup(target);
if (strpbrk(target, "/@") != NULL)
*strpbrk(target_pool, "/@") = '\0';
zdb_set_skip_mmp(target);
/*
* If -N was supplied, the user has indicated that
* zdb -d <pool>/<objsetID> is in effect. Otherwise
* we first assume that the dataset string is the
* dataset name. If dmu_objset_hold fails with the
* dataset string, and we have an objset_id, retry the
* lookup with the objsetID.
*/
boolean_t retry = B_TRUE;
retry_lookup:
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target_pool, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0) {
if (objset_id > 0 && retry) {
int err = dmu_objset_hold(target, FTAG,
&os);
if (err) {
dataset_lookup = B_TRUE;
retry = B_FALSE;
goto retry_lookup;
} else {
dmu_objset_rele(os, FTAG);
}
}
error = open_objset(target, FTAG, &os);
}
if (error == 0)
spa = dmu_objset_spa(os);
free(target_pool);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
const char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ?: "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
- if (os != NULL) {
+ if (dump_opt['B']) {
+ dump_backup(target, objset_id,
+ argc > 0 ? argv[0] : NULL);
+ } else if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
index a2daa77a61fe..8fabb8d081a5 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
@@ -1,458 +1,455 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021 Hewlett Packard Enterprise Development LP
*/
#include <libnvpair.h>
#include <libzfs.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/list.h>
#include <sys/time.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <sys/fm/protocol.h>
#include <sys/fm/fs/zfs.h>
#include <pthread.h>
#include <unistd.h>
#include "zfs_agents.h"
#include "fmd_api.h"
#include "../zed_log.h"
/*
* agent dispatch code
*/
static pthread_mutex_t agent_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t agent_cond = PTHREAD_COND_INITIALIZER;
static list_t agent_events; /* list of pending events */
static int agent_exiting;
typedef struct agent_event {
char ae_class[64];
char ae_subclass[32];
nvlist_t *ae_nvl;
list_node_t ae_node;
} agent_event_t;
pthread_t g_agents_tid;
libzfs_handle_t *g_zfs_hdl;
/* guid search data */
typedef enum device_type {
DEVICE_TYPE_L2ARC, /* l2arc device */
DEVICE_TYPE_SPARE, /* spare device */
DEVICE_TYPE_PRIMARY /* any primary pool storage device */
} device_type_t;
typedef struct guid_search {
uint64_t gs_pool_guid;
uint64_t gs_vdev_guid;
const char *gs_devid;
device_type_t gs_vdev_type;
uint64_t gs_vdev_expandtime; /* vdev expansion time */
} guid_search_t;
/*
* Walks the vdev tree recursively looking for a matching devid.
* Returns B_TRUE as soon as a matching device is found, B_FALSE otherwise.
*/
static boolean_t
zfs_agent_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *arg)
{
guid_search_t *gsp = arg;
const char *path = NULL;
uint_t c, children;
nvlist_t **child;
uint64_t vdev_guid;
/*
* First iterate over any children.
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if (zfs_agent_iter_vdev(zhp, child[c], gsp)) {
gsp->gs_vdev_type = DEVICE_TYPE_PRIMARY;
return (B_TRUE);
}
}
}
/*
* Iterate over any spares and cache devices
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if (zfs_agent_iter_vdev(zhp, child[c], gsp)) {
gsp->gs_vdev_type = DEVICE_TYPE_SPARE;
return (B_TRUE);
}
}
}
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if (zfs_agent_iter_vdev(zhp, child[c], gsp)) {
gsp->gs_vdev_type = DEVICE_TYPE_L2ARC;
return (B_TRUE);
}
}
}
/*
* On a devid match, grab the vdev guid and expansion time, if any.
*/
if (gsp->gs_devid != NULL &&
(nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID, &path) == 0) &&
(strcmp(gsp->gs_devid, path) == 0)) {
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
&gsp->gs_vdev_guid);
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
&gsp->gs_vdev_expandtime);
return (B_TRUE);
}
/*
* Otherwise, on a vdev guid match, grab the devid and expansion
* time. The devid might be missing on removal since its not part
* of blkid cache and L2ARC VDEV does not contain pool guid in its
* blkid, so this is a special case for L2ARC VDEV.
*/
else if (gsp->gs_vdev_guid != 0 && gsp->gs_devid == NULL &&
nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &vdev_guid) == 0 &&
gsp->gs_vdev_guid == vdev_guid) {
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
&gsp->gs_devid);
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
&gsp->gs_vdev_expandtime);
return (B_TRUE);
}
return (B_FALSE);
}
static int
zfs_agent_iter_pool(zpool_handle_t *zhp, void *arg)
{
guid_search_t *gsp = arg;
nvlist_t *config, *nvl;
/*
* For each vdev in this pool, look for a match by devid
*/
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvl) == 0) {
(void) zfs_agent_iter_vdev(zhp, nvl, gsp);
}
}
/*
* if a match was found then grab the pool guid
*/
if (gsp->gs_vdev_guid && gsp->gs_devid) {
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&gsp->gs_pool_guid);
}
zpool_close(zhp);
return (gsp->gs_devid != NULL && gsp->gs_vdev_guid != 0);
}
void
zfs_agent_post_event(const char *class, const char *subclass, nvlist_t *nvl)
{
agent_event_t *event;
if (subclass == NULL)
subclass = "";
event = malloc(sizeof (agent_event_t));
if (event == NULL || nvlist_dup(nvl, &event->ae_nvl, 0) != 0) {
if (event)
free(event);
return;
}
if (strcmp(class, "sysevent.fs.zfs.vdev_check") == 0) {
class = EC_ZFS;
subclass = ESC_ZFS_VDEV_CHECK;
}
/*
* On Linux, we don't get the expected FM_RESOURCE_REMOVED ereport
* from the vdev_disk layer after a hot unplug. Fortunately we do
* get an EC_DEV_REMOVE from our disk monitor and it is a suitable
* proxy so we remap it here for the benefit of the diagnosis engine.
* Starting in OpenZFS 2.0, we do get FM_RESOURCE_REMOVED from the spa
* layer. Processing multiple FM_RESOURCE_REMOVED events is not harmful.
*/
if ((strcmp(class, EC_DEV_REMOVE) == 0) &&
(strcmp(subclass, ESC_DISK) == 0) &&
(nvlist_exists(nvl, ZFS_EV_VDEV_GUID) ||
nvlist_exists(nvl, DEV_IDENTIFIER))) {
nvlist_t *payload = event->ae_nvl;
struct timeval tv;
int64_t tod[2];
uint64_t pool_guid = 0, vdev_guid = 0;
guid_search_t search = { 0 };
device_type_t devtype = DEVICE_TYPE_PRIMARY;
const char *devid = NULL;
class = "resource.fs.zfs.removed";
subclass = "";
(void) nvlist_add_string(payload, FM_CLASS, class);
(void) nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
(void) gettimeofday(&tv, NULL);
tod[0] = tv.tv_sec;
tod[1] = tv.tv_usec;
(void) nvlist_add_int64_array(payload, FM_EREPORT_TIME, tod, 2);
/*
* If devid is missing but vdev_guid is available, find devid
* and pool_guid from vdev_guid.
* For multipath, spare and l2arc devices ZFS_EV_VDEV_GUID or
* ZFS_EV_POOL_GUID may be missing so find them.
*/
if (devid == NULL || pool_guid == 0 || vdev_guid == 0) {
if (devid == NULL)
search.gs_vdev_guid = vdev_guid;
else
search.gs_devid = devid;
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
if (devid == NULL)
devid = search.gs_devid;
if (pool_guid == 0)
pool_guid = search.gs_pool_guid;
if (vdev_guid == 0)
vdev_guid = search.gs_vdev_guid;
devtype = search.gs_vdev_type;
}
/*
* We want to avoid reporting "remove" events coming from
* libudev for VDEVs which were expanded recently (10s) and
* avoid activating spares in response to partitions being
* deleted and created in rapid succession.
*/
if (search.gs_vdev_expandtime != 0 &&
search.gs_vdev_expandtime + 10 > tv.tv_sec) {
zed_log_msg(LOG_INFO, "agent post event: ignoring '%s' "
"for recently expanded device '%s'", EC_DEV_REMOVE,
devid);
fnvlist_free(payload);
free(event);
goto out;
}
(void) nvlist_add_uint64(payload,
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, pool_guid);
(void) nvlist_add_uint64(payload,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vdev_guid);
switch (devtype) {
case DEVICE_TYPE_L2ARC:
(void) nvlist_add_string(payload,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
VDEV_TYPE_L2CACHE);
break;
case DEVICE_TYPE_SPARE:
(void) nvlist_add_string(payload,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE, VDEV_TYPE_SPARE);
break;
case DEVICE_TYPE_PRIMARY:
(void) nvlist_add_string(payload,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE, VDEV_TYPE_DISK);
break;
}
zed_log_msg(LOG_INFO, "agent post event: mapping '%s' to '%s'",
EC_DEV_REMOVE, class);
}
(void) strlcpy(event->ae_class, class, sizeof (event->ae_class));
(void) strlcpy(event->ae_subclass, subclass,
sizeof (event->ae_subclass));
(void) pthread_mutex_lock(&agent_lock);
list_insert_tail(&agent_events, event);
(void) pthread_mutex_unlock(&agent_lock);
out:
(void) pthread_cond_signal(&agent_cond);
}
static void
zfs_agent_dispatch(const char *class, const char *subclass, nvlist_t *nvl)
{
/*
* The diagnosis engine subscribes to the following events.
* On illumos these subscriptions reside in:
* /usr/lib/fm/fmd/plugins/zfs-diagnosis.conf
*/
if (strstr(class, "ereport.fs.zfs.") != NULL ||
strstr(class, "resource.fs.zfs.") != NULL ||
strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0 ||
strcmp(class, "sysevent.fs.zfs.vdev_remove_dev") == 0 ||
strcmp(class, "sysevent.fs.zfs.pool_destroy") == 0) {
fmd_module_recv(fmd_module_hdl("zfs-diagnosis"), nvl, class);
}
/*
* The retire agent subscribes to the following events.
* On illumos these subscriptions reside in:
* /usr/lib/fm/fmd/plugins/zfs-retire.conf
*
* NOTE: faults events come directly from our diagnosis engine
* and will not pass through the zfs kernel module.
*/
if (strcmp(class, FM_LIST_SUSPECT_CLASS) == 0 ||
strcmp(class, "resource.fs.zfs.removed") == 0 ||
strcmp(class, "resource.fs.zfs.statechange") == 0 ||
strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
fmd_module_recv(fmd_module_hdl("zfs-retire"), nvl, class);
}
/*
* The SLM module only consumes disk events and vdev check events
*
* NOTE: disk events come directly from disk monitor and will
* not pass through the zfs kernel module.
*/
if (strstr(class, "EC_dev_") != NULL ||
strcmp(class, EC_ZFS) == 0) {
(void) zfs_slm_event(class, subclass, nvl);
}
}
/*
* Events are consumed and dispatched from this thread
* An agent can also post an event so event list lock
* is not held when calling an agent.
* One event is consumed at a time.
*/
static void *
zfs_agent_consumer_thread(void *arg)
{
(void) arg;
for (;;) {
agent_event_t *event;
(void) pthread_mutex_lock(&agent_lock);
/* wait for an event to show up */
while (!agent_exiting && list_is_empty(&agent_events))
(void) pthread_cond_wait(&agent_cond, &agent_lock);
if (agent_exiting) {
(void) pthread_mutex_unlock(&agent_lock);
zed_log_msg(LOG_INFO, "zfs_agent_consumer_thread: "
"exiting");
return (NULL);
}
- if ((event = (list_head(&agent_events))) != NULL) {
- list_remove(&agent_events, event);
-
+ if ((event = list_remove_head(&agent_events)) != NULL) {
(void) pthread_mutex_unlock(&agent_lock);
/* dispatch to all event subscribers */
zfs_agent_dispatch(event->ae_class, event->ae_subclass,
event->ae_nvl);
nvlist_free(event->ae_nvl);
free(event);
continue;
}
(void) pthread_mutex_unlock(&agent_lock);
}
return (NULL);
}
void
zfs_agent_init(libzfs_handle_t *zfs_hdl)
{
fmd_hdl_t *hdl;
g_zfs_hdl = zfs_hdl;
if (zfs_slm_init() != 0)
zed_log_die("Failed to initialize zfs slm");
zed_log_msg(LOG_INFO, "Add Agent: init");
hdl = fmd_module_hdl("zfs-diagnosis");
_zfs_diagnosis_init(hdl);
if (!fmd_module_initialized(hdl))
zed_log_die("Failed to initialize zfs diagnosis");
hdl = fmd_module_hdl("zfs-retire");
_zfs_retire_init(hdl);
if (!fmd_module_initialized(hdl))
zed_log_die("Failed to initialize zfs retire");
list_create(&agent_events, sizeof (agent_event_t),
offsetof(struct agent_event, ae_node));
if (pthread_create(&g_agents_tid, NULL, zfs_agent_consumer_thread,
NULL) != 0) {
list_destroy(&agent_events);
zed_log_die("Failed to initialize agents");
}
pthread_setname_np(g_agents_tid, "agents");
}
void
zfs_agent_fini(void)
{
fmd_hdl_t *hdl;
agent_event_t *event;
agent_exiting = 1;
(void) pthread_cond_signal(&agent_cond);
/* wait for zfs_enum_pools thread to complete */
(void) pthread_join(g_agents_tid, NULL);
/* drain any pending events */
- while ((event = (list_head(&agent_events))) != NULL) {
- list_remove(&agent_events, event);
+ while ((event = list_remove_head(&agent_events)) != NULL) {
nvlist_free(event->ae_nvl);
free(event);
}
list_destroy(&agent_events);
if ((hdl = fmd_module_hdl("zfs-retire")) != NULL) {
_zfs_retire_fini(hdl);
fmd_hdl_unregister(hdl);
}
if ((hdl = fmd_module_hdl("zfs-diagnosis")) != NULL) {
_zfs_diagnosis_fini(hdl);
fmd_hdl_unregister(hdl);
}
zed_log_msg(LOG_INFO, "Add Agent: fini");
zfs_slm_fini();
g_zfs_hdl = NULL;
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
index 1c82bd4f0010..b07a02712295 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
@@ -1,1312 +1,1309 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016, 2017, Intel Corporation.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
* ZFS syseventd module.
*
* file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
*
* The purpose of this module is to identify when devices are added to the
* system, and appropriately online or replace the affected vdevs.
*
* When a device is added to the system:
*
* 1. Search for any vdevs whose devid matches that of the newly added
* device.
*
* 2. If no vdevs are found, then search for any vdevs whose udev path
* matches that of the new device.
*
* 3. If no vdevs match by either method, then ignore the event.
*
* 4. Attempt to online the device with a flag to indicate that it should
* be unspared when resilvering completes. If this succeeds, then the
* same device was inserted and we should continue normally.
*
* 5. If the pool does not have the 'autoreplace' property set, attempt to
* online the device again without the unspare flag, which will
* generate a FMA fault.
*
* 6. If the pool has the 'autoreplace' property set, and the matching vdev
* is a whole disk, then label the new disk and attempt a 'zpool
* replace'.
*
* The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
* event indicates that a device failed to open during pool load, but the
* autoreplace property was set. In this case, we deferred the associated
* FMA fault until our module had a chance to process the autoreplace logic.
* If the device could not be replaced, then the second online attempt will
* trigger the FMA fault that we skipped earlier.
*
* On Linux udev provides a disk insert for both the disk and the partition.
*/
#include <ctype.h>
#include <fcntl.h>
#include <libnvpair.h>
#include <libzfs.h>
#include <libzutil.h>
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <sys/list.h>
#include <sys/sunddi.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <thread_pool.h>
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
#include "zfs_agents.h"
#include "../zed_log.h"
#define DEV_BYID_PATH "/dev/disk/by-id/"
#define DEV_BYPATH_PATH "/dev/disk/by-path/"
#define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
libzfs_handle_t *g_zfshdl;
list_t g_pool_list; /* list of unavailable pools at initialization */
list_t g_device_list; /* list of disks with asynchronous label request */
tpool_t *g_tpool;
boolean_t g_enumeration_done;
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
typedef struct unavailpool {
zpool_handle_t *uap_zhp;
list_node_t uap_node;
} unavailpool_t;
typedef struct pendingdev {
char pd_physpath[128];
list_node_t pd_node;
} pendingdev_t;
static int
zfs_toplevel_state(zpool_handle_t *zhp)
{
nvlist_t *nvroot;
vdev_stat_t *vs;
unsigned int c;
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
return (vs->vs_state);
}
static int
zfs_unavail_pool(zpool_handle_t *zhp, void *data)
{
zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
unavailpool_t *uap;
uap = malloc(sizeof (unavailpool_t));
if (uap == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
uap->uap_zhp = zhp;
list_insert_tail((list_t *)data, uap);
} else {
zpool_close(zhp);
}
return (0);
}
/*
* Two stage replace on Linux
* since we get disk notifications
* we can wait for partitioned disk slice to show up!
*
* First stage tags the disk, initiates async partitioning, and returns
* Second stage finds the tag and proceeds to ZFS labeling/replace
*
* disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
*
* 1. physical match with no fs, no partition
* tag it top, partition disk
*
* 2. physical match again, see partition and tag
*
*/
/*
* The device associated with the given vdev (either by devid or physical path)
* has been added to the system. If 'isdisk' is set, then we only attempt a
* replacement if it's a whole disk. This also implies that we should label the
* disk first.
*
* First, we attempt to online the device (making sure to undo any spare
* operation when finished). If this succeeds, then we're done. If it fails,
* and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
* but that the label was not what we expected. If the 'autoreplace' property
* is enabled, then we relabel the disk (if specified), and attempt a 'zpool
* replace'. If the online is successful, but the new state is something else
* (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
* race, and we should avoid attempting to relabel the disk.
*
* Also can arrive here from a ESC_ZFS_VDEV_CHECK event
*/
static void
zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
{
const char *path;
vdev_state_t newstate;
nvlist_t *nvroot, *newvd;
pendingdev_t *device;
uint64_t wholedisk = 0ULL;
uint64_t offline = 0ULL, faulted = 0ULL;
uint64_t guid = 0ULL;
uint64_t is_spare = 0;
const char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
char rawpath[PATH_MAX], fullpath[PATH_MAX];
char devpath[PATH_MAX];
int ret;
int online_flag = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE;
boolean_t is_sd = B_FALSE;
boolean_t is_mpath_wholedisk = B_FALSE;
uint_t c;
vdev_stat_t *vs;
if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
return;
/* Skip healthy disks */
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (vs->vs_state == VDEV_STATE_HEALTHY) {
zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
__func__, path);
return;
}
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&enc_sysfs_path);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_IS_SPARE, &is_spare);
/*
* Special case:
*
* We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
* entry in their config. For example, on this force-faulted disk:
*
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
* the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
*/
if (physpath == NULL && path != NULL) {
/* If path begins with "/dev/disk/by-vdev/" ... */
if (strncmp(path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) == 0) {
/* Set physpath to the char after "/dev/disk/by-vdev" */
physpath = &path[strlen(DEV_BYVDEV_PATH)];
}
}
/*
* We don't want to autoreplace offlined disks. However, we do want to
* replace force-faulted disks (`zpool offline -f`). Force-faulted
* disks have both offline=1 and faulted=1 in the nvlist.
*/
if (offline && !faulted) {
zed_log_msg(LOG_INFO, "%s: %s is offline, skip autoreplace",
__func__, path);
return;
}
is_mpath_wholedisk = is_mpath_whole_disk(path);
zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
" %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
"(guid %llu)",
zpool_get_name(zhp), path,
physpath ? physpath : "NULL",
wholedisk ? "is" : "not",
is_mpath_wholedisk? "is" : "not",
labeled ? "is" : "not",
enc_sysfs_path,
(long long unsigned int)guid);
/*
* The VDEV guid is preferred for identification (gets passed in path)
*/
if (guid != 0) {
(void) snprintf(fullpath, sizeof (fullpath), "%llu",
(long long unsigned int)guid);
} else {
/*
* otherwise use path sans partition suffix for whole disks
*/
(void) strlcpy(fullpath, path, sizeof (fullpath));
if (wholedisk) {
char *spath = zfs_strip_partition(fullpath);
if (!spath) {
zed_log_msg(LOG_INFO, "%s: Can't alloc",
__func__);
return;
}
(void) strlcpy(fullpath, spath, sizeof (fullpath));
free(spath);
}
}
if (is_spare)
online_flag |= ZFS_ONLINE_SPARE;
/*
* Attempt to online the device.
*/
if (zpool_vdev_online(zhp, fullpath, online_flag, &newstate) == 0 &&
(newstate == VDEV_STATE_HEALTHY ||
newstate == VDEV_STATE_DEGRADED)) {
zed_log_msg(LOG_INFO,
" zpool_vdev_online: vdev '%s' ('%s') is "
"%s", fullpath, physpath, (newstate == VDEV_STATE_HEALTHY) ?
"HEALTHY" : "DEGRADED");
return;
}
/*
* vdev_id alias rule for using scsi_debug devices (FMA automated
* testing)
*/
if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
is_sd = B_TRUE;
/*
* If the pool doesn't have the autoreplace property set, then use
* vdev online to trigger a FMA fault by posting an ereport.
*/
if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
!(wholedisk || is_mpath_wholedisk) || (physpath == NULL)) {
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
"not a blank disk for '%s' ('%s')", fullpath,
physpath);
return;
}
/*
* Convert physical path into its current device node. Rawpath
* needs to be /dev/disk/by-vdev for a scsi_debug device since
* /dev/disk/by-path will not be present.
*/
(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
if (realpath(rawpath, devpath) == NULL && !is_mpath_wholedisk) {
zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
rawpath, strerror(errno));
(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
&newstate);
zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
fullpath, libzfs_error_description(g_zfshdl));
return;
}
/* Only autoreplace bad disks */
if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
(vs->vs_state != VDEV_STATE_FAULTED) &&
(vs->vs_state != VDEV_STATE_CANT_OPEN)) {
zed_log_msg(LOG_INFO, " not autoreplacing since disk isn't in "
"a bad state (currently %llu)", vs->vs_state);
return;
}
nvlist_lookup_string(vdev, "new_devid", &new_devid);
if (is_mpath_wholedisk) {
/* Don't label device mapper or multipath disks. */
} else if (!labeled) {
/*
* we're auto-replacing a raw disk, so label it first
*/
char *leafname;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label. Before we can label the disk, we need
* to map the physical string that was matched on to the under
* lying device node.
*
* If any part of this process fails, then do a force online
* to trigger a ZFS fault for the device (and any hot spare
* replacement).
*/
leafname = strrchr(devpath, '/') + 1;
/*
* If this is a request to label a whole disk, then attempt to
* write out the label.
*/
if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
"label '%s' (%s)", leafname,
libzfs_error_description(g_zfshdl));
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
/*
* The disk labeling is asynchronous on Linux. Just record
* this label request and return as there will be another
* disk add event for the partition after the labeling is
* completed.
*/
device = malloc(sizeof (pendingdev_t));
if (device == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
(void) strlcpy(device->pd_physpath, physpath,
sizeof (device->pd_physpath));
list_insert_tail(&g_device_list, device);
zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
leafname, (u_longlong_t)guid);
return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
} else /* labeled */ {
boolean_t found = B_FALSE;
/*
* match up with request above to label the disk
*/
for (device = list_head(&g_device_list); device != NULL;
device = list_next(&g_device_list, device)) {
if (strcmp(physpath, device->pd_physpath) == 0) {
list_remove(&g_device_list, device);
free(device);
found = B_TRUE;
break;
}
zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
physpath, device->pd_physpath);
}
if (!found) {
/* unexpected partition slice encountered */
zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
fullpath);
(void) zpool_vdev_online(zhp, fullpath,
ZFS_ONLINE_FORCEFAULT, &newstate);
return;
}
zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
physpath, (u_longlong_t)guid);
(void) snprintf(devpath, sizeof (devpath), "%s%s",
DEV_BYID_PATH, new_devid);
}
/*
* Construct the root vdev to pass to zpool_vdev_attach(). While adding
* the entire vdev structure is harmless, we construct a reduced set of
* path/physpath/wholedisk to keep it simple.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
return;
}
if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
nvlist_free(nvroot);
return;
}
if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
(physpath != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
(enc_sysfs_path != NULL && nvlist_add_string(newvd,
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t **)&newvd, 1) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
nvlist_free(newvd);
nvlist_free(nvroot);
return;
}
nvlist_free(newvd);
/*
* Wait for udev to verify the links exist, then auto-replace
* the leaf disk at same physical location.
*/
if (zpool_label_disk_wait(path, 3000) != 0) {
zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
"disk %s is missing", path);
nvlist_free(nvroot);
return;
}
/*
* Prefer sequential resilvering when supported (mirrors and dRAID),
* otherwise fallback to a traditional healing resilver.
*/
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
if (ret != 0) {
ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
B_TRUE, B_FALSE);
}
zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
fullpath, path, (ret == 0) ? "no errors" :
libzfs_error_description(g_zfshdl));
nvlist_free(nvroot);
}
/*
* Utility functions to find a vdev matching given criteria.
*/
typedef struct dev_data {
const char *dd_compare;
const char *dd_prop;
zfs_process_func_t dd_func;
boolean_t dd_found;
boolean_t dd_islabeled;
uint64_t dd_pool_guid;
uint64_t dd_vdev_guid;
uint64_t dd_new_vdev_guid;
const char *dd_new_devid;
uint64_t dd_num_spares;
} dev_data_t;
static void
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
{
dev_data_t *dp = data;
const char *path = NULL;
uint_t c, children;
nvlist_t **child;
uint64_t guid = 0;
uint64_t isspare = 0;
/*
* First iterate over any children.
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/*
* Iterate over any spares and cache devices
*/
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
zfs_iter_vdev(zhp, child[c], data);
}
/* once a vdev was matched and processed there is nothing left to do */
if (dp->dd_found && dp->dd_num_spares == 0)
return;
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
/*
* Match by GUID if available otherwise fallback to devid or physical
*/
if (dp->dd_vdev_guid != 0) {
if (guid != dp->dd_vdev_guid)
return;
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
dp->dd_found = B_TRUE;
} else if (dp->dd_compare != NULL) {
/*
* NOTE: On Linux there is an event for partition, so unlike
* illumos, substring matching is not required to accommodate
* the partition suffix. An exact match will be present in
* the dp->dd_compare value.
* If the attached disk already contains a vdev GUID, it means
* the disk is not clean. In such a scenario, the physical path
* would be a match that makes the disk faulted when trying to
* online it. So, we would only want to proceed if either GUID
* matches with the last attached disk or the disk is in clean
* state.
*/
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
strcmp(dp->dd_compare, path) != 0) {
zed_log_msg(LOG_INFO, " %s: no match (%s != vdev %s)",
__func__, dp->dd_compare, path);
return;
}
if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"
" != vdev GUID:%llu)", __func__,
dp->dd_new_vdev_guid, guid);
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
dp->dd_prop, path);
dp->dd_found = B_TRUE;
/* pass the new devid for use by replacing code */
if (dp->dd_new_devid != NULL) {
(void) nvlist_add_string(nvl, "new_devid",
dp->dd_new_devid);
}
}
if (dp->dd_found == B_TRUE && nvlist_lookup_uint64(nvl,
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
dp->dd_num_spares++;
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
static void
zfs_enable_ds(void *arg)
{
unavailpool_t *pool = (unavailpool_t *)arg;
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
zpool_close(pool->uap_zhp);
free(pool);
}
static int
zfs_iter_pool(zpool_handle_t *zhp, void *data)
{
nvlist_t *config, *nvl;
dev_data_t *dp = data;
uint64_t pool_guid;
unavailpool_t *pool;
zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
/*
* For each vdev in this pool, look for a match to apply dd_func
*/
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
if (dp->dd_pool_guid == 0 ||
(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
(void) nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvl);
zfs_iter_vdev(zhp, nvl, data);
}
} else {
zed_log_msg(LOG_INFO, "%s: no config\n", __func__);
}
/*
* if this pool was originally unavailable,
* then enable its datasets asynchronously
*/
if (g_enumeration_done) {
for (pool = list_head(&g_pool_list); pool != NULL;
pool = list_next(&g_pool_list, pool)) {
if (strcmp(zpool_get_name(zhp),
zpool_get_name(pool->uap_zhp)))
continue;
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
list_remove(&g_pool_list, pool);
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
pool);
break;
}
}
}
zpool_close(zhp);
/* cease iteration after a match */
return (dp->dd_found && dp->dd_num_spares == 0);
}
/*
* Given a physical device location, iterate over all
* (pool, vdev) pairs which correspond to that location.
*/
static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
boolean_t is_slice, uint64_t new_vdev_guid)
{
dev_data_t data = { 0 };
data.dd_compare = physical;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid; /* used by auto replace code */
data.dd_new_vdev_guid = new_vdev_guid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching by-vdev
* path. Normally we shouldn't need this as the comparison would be
* made earlier in the devphys_iter(). For example, if we were replacing
* /dev/disk/by-vdev/L28, normally devphys_iter() would match the
* ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
* of the new disk config. However, we've seen cases where
* ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
* an example of a real 2-disk mirror pool where one disk was force
* faulted:
*
* com.delphix:vdev_zap_top: 129
* children[0]:
* type: 'disk'
* id: 0
* guid: 14309659774640089719
* path: '/dev/disk/by-vdev/L28'
* whole_disk: 0
* DTL: 654
* create_txg: 4
* com.delphix:vdev_zap_leaf: 1161
* faulted: 1
* aux_state: 'external'
* children[1]:
* type: 'disk'
* id: 1
* guid: 16002508084177980912
* path: '/dev/disk/by-vdev/L29'
* devid: 'dm-uuid-mpath-35000c500a61d68a3'
* phys_path: 'L29'
* vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
* whole_disk: 0
* DTL: 1028
* create_txg: 4
* com.delphix:vdev_zap_leaf: 131
*
* So in the case above, the only thing we could compare is the path.
*
* We can do this because we assume by-vdev paths are authoritative as physical
* paths. We could not assume this for normal paths like /dev/sda since the
* physical location /dev/sda points to could change over time.
*/
static boolean_t
by_vdev_path_iter(const char *by_vdev_path, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = by_vdev_path;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_PATH;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
if (strncmp(by_vdev_path, DEV_BYVDEV_PATH,
strlen(DEV_BYVDEV_PATH)) != 0) {
/* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
return (B_FALSE);
}
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device identifier, find any vdevs with a matching devid.
* On Linux we can match devid directly which is always a whole disk.
*/
static boolean_t
devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_compare = devid;
data.dd_func = func;
data.dd_prop = ZPOOL_CONFIG_DEVID;
data.dd_found = B_FALSE;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Given a device guid, find any vdevs with a matching guid.
*/
static boolean_t
guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
zfs_process_func_t func, boolean_t is_slice)
{
dev_data_t data = { 0 };
data.dd_func = func;
data.dd_found = B_FALSE;
data.dd_pool_guid = pool_guid;
data.dd_vdev_guid = vdev_guid;
data.dd_islabeled = is_slice;
data.dd_new_devid = devid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (data.dd_found);
}
/*
* Handle a EC_DEV_ADD.ESC_DISK event.
*
* illumos
* Expects: DEV_PHYS_PATH string in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/dsk/c0t1d0s0' (persistent)
* devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
* phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
*
* linux
* provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
* Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
*
* path: '/dev/sdc1' (not persistent)
* devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
* phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
*/
static int
zfs_deliver_add(nvlist_t *nvl)
{
const char *devpath = NULL, *devid = NULL;
uint64_t pool_guid = 0, vdev_guid = 0;
boolean_t is_slice;
/*
* Expecting a devid string and an optional physical location and guid
*/
if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0) {
zed_log_msg(LOG_INFO, "%s: no dev identifier\n", __func__);
return (-1);
}
(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
devid, devpath ? devpath : "NULL", is_slice);
/*
* Iterate over all vdevs looking for a match in the following order:
* 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
* 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
* 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
* 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
* by-vdev paths represent physical paths).
*/
if (devid_iter(devid, zfs_process_add, is_slice))
return (0);
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
is_slice, vdev_guid))
return (0);
if (vdev_guid != 0)
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
is_slice);
if (devpath != NULL) {
/* Can we match a /dev/disk/by-vdev/ path? */
char by_vdev_path[MAXPATHLEN];
snprintf(by_vdev_path, sizeof (by_vdev_path),
"/dev/disk/by-vdev/%s", devpath);
if (by_vdev_path_iter(by_vdev_path, devid, zfs_process_add,
is_slice))
return (0);
}
return (0);
}
/*
* Called when we receive a VDEV_CHECK event, which indicates a device could not
* be opened during initial pool open, but the autoreplace property was set on
* the pool. In this case, we treat it as if it were an add event.
*/
static int
zfs_deliver_check(nvlist_t *nvl)
{
dev_data_t data = { 0 };
if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
&data.dd_pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
&data.dd_vdev_guid) != 0 ||
data.dd_vdev_guid == 0)
return (0);
zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
data.dd_pool_guid, data.dd_vdev_guid);
data.dd_func = zfs_process_add;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
return (0);
}
/*
* Given a path to a vdev, lookup the vdev's physical size from its
* config nvlist.
*
* Returns the vdev's physical size in bytes on success, 0 on error.
*/
static uint64_t
vdev_size_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
vdev_stat_t *vs = NULL;
uint_t c;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
verify(nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (!vs) {
zed_log_msg(LOG_INFO, "%s: no nvlist for '%s'", __func__,
vdev_path);
return (0);
}
return (vs->vs_pspace);
}
/*
* Given a path to a vdev, lookup if the vdev is a "whole disk" in the
* config nvlist. "whole disk" means that ZFS was passed a whole disk
* at pool creation time, which it partitioned up and has full control over.
* Thus a partition with wholedisk=1 set tells us that zfs created the
* partition at creation time. A partition without whole disk set would have
* been created by externally (like with fdisk) and passed to ZFS.
*
* Returns the whole disk value (either 0 or 1).
*/
static uint64_t
vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
{
nvlist_t *nvl = NULL;
boolean_t avail_spare, l2cache, log;
uint64_t wholedisk = 0;
nvl = zpool_find_vdev(zhp, vdev_path, &avail_spare, &l2cache, &log);
if (!nvl)
return (0);
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
return (wholedisk);
}
/*
* If the device size grew more than 1% then return true.
*/
#define DEVICE_GREW(oldsize, newsize) \
((newsize > oldsize) && \
((newsize / (newsize - oldsize)) <= 100))
static int
zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
{
boolean_t avail_spare, l2cache;
nvlist_t *udev_nvl = data;
nvlist_t *tgt;
int error;
const char *tmp_devname;
char devname[MAXPATHLEN] = "";
uint64_t guid;
if (nvlist_lookup_uint64(udev_nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(devname, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(udev_nvl, DEV_PHYS_PATH,
&tmp_devname) == 0) {
strlcpy(devname, tmp_devname, MAXPATHLEN);
zfs_append_partition(devname, MAXPATHLEN);
} else {
zed_log_msg(LOG_INFO, "%s: no guid or physpath", __func__);
}
zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
devname, zpool_get_name(zhp));
if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
&avail_spare, &l2cache, NULL)) != NULL) {
const char *path;
char fullpath[MAXPATHLEN];
uint64_t wholedisk = 0;
error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
if (error) {
zpool_close(zhp);
return (0);
}
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (wholedisk) {
char *tmp;
path = strrchr(path, '/');
if (path != NULL) {
tmp = zfs_strip_partition(path + 1);
if (tmp == NULL) {
zpool_close(zhp);
return (0);
}
} else {
zpool_close(zhp);
return (0);
}
(void) strlcpy(fullpath, tmp, sizeof (fullpath));
free(tmp);
/*
* We need to reopen the pool associated with this
* device so that the kernel can update the size of
* the expanded device. When expanding there is no
* need to restart the scrub from the beginning.
*/
boolean_t scrub_restart = B_FALSE;
(void) zpool_reopen_one(zhp, &scrub_restart);
} else {
(void) strlcpy(fullpath, path, sizeof (fullpath));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
vdev_state_t newstate;
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
/*
* If this disk size has not changed, then
* there's no need to do an autoexpand. To
* check we look at the disk's size in its
* config, and compare it to the disk size
* that udev is reporting.
*/
uint64_t udev_size = 0, conf_size = 0,
wholedisk = 0, udev_parent_size = 0;
/*
* Get the size of our disk that udev is
* reporting.
*/
if (nvlist_lookup_uint64(udev_nvl, DEV_SIZE,
&udev_size) != 0) {
udev_size = 0;
}
/*
* Get the size of our disk's parent device
* from udev (where sda1's parent is sda).
*/
if (nvlist_lookup_uint64(udev_nvl,
DEV_PARENT_SIZE, &udev_parent_size) != 0) {
udev_parent_size = 0;
}
conf_size = vdev_size_from_config(zhp,
fullpath);
wholedisk = vdev_whole_disk_from_config(zhp,
fullpath);
/*
* Only attempt an autoexpand if the vdev size
* changed. There are two different cases
* to consider.
*
* 1. wholedisk=1
* If you do a 'zpool create' on a whole disk
* (like /dev/sda), then zfs will create
* partitions on the disk (like /dev/sda1). In
* that case, wholedisk=1 will be set in the
* partition's nvlist config. So zed will need
* to see if your parent device (/dev/sda)
* expanded in size, and if so, then attempt
* the autoexpand.
*
* 2. wholedisk=0
* If you do a 'zpool create' on an existing
* partition, or a device that doesn't allow
* partitions, then wholedisk=0, and you will
* simply need to check if the device itself
* expanded in size.
*/
if (DEVICE_GREW(conf_size, udev_size) ||
(wholedisk && DEVICE_GREW(conf_size,
udev_parent_size))) {
error = zpool_vdev_online(zhp, fullpath,
0, &newstate);
zed_log_msg(LOG_INFO,
"%s: autoexpanding '%s' from %llu"
" to %llu bytes in pool '%s': %d",
__func__, fullpath, conf_size,
MAX(udev_size, udev_parent_size),
zpool_get_name(zhp), error);
}
}
}
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* This function handles the ESC_DEV_DLE device change event. Use the
* provided vdev guid when looking up a disk or partition, when the guid
* is not present assume the entire disk is owned by ZFS and append the
* expected -part1 partition information then lookup by physical path.
*/
static int
zfs_deliver_dle(nvlist_t *nvl)
{
const char *devname;
char name[MAXPATHLEN];
uint64_t guid;
if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
sprintf(name, "%llu", (u_longlong_t)guid);
} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
strlcpy(name, devname, MAXPATHLEN);
zfs_append_partition(name, MAXPATHLEN);
} else {
sprintf(name, "unknown");
zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
}
if (zpool_iter(g_zfshdl, zfsdle_vdev_online, nvl) != 1) {
zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
"found", name);
return (1);
}
return (0);
}
/*
* syseventd daemon module event handler
*
* Handles syseventd daemon zfs device related events:
*
* EC_DEV_ADD.ESC_DISK
* EC_DEV_STATUS.ESC_DEV_DLE
* EC_ZFS.ESC_ZFS_VDEV_CHECK
*
* Note: assumes only one thread active at a time (not thread safe)
*/
static int
zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
{
int ret;
boolean_t is_check = B_FALSE, is_dle = B_FALSE;
if (strcmp(class, EC_DEV_ADD) == 0) {
/*
* We're mainly interested in disk additions, but we also listen
* for new loop devices, to allow for simplified testing.
*/
if (strcmp(subclass, ESC_DISK) != 0 &&
strcmp(subclass, ESC_LOFI) != 0)
return (0);
is_check = B_FALSE;
} else if (strcmp(class, EC_ZFS) == 0 &&
strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
/*
* This event signifies that a device failed to open
* during pool load, but the 'autoreplace' property was
* set, so we should pretend it's just been added.
*/
is_check = B_TRUE;
} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
strcmp(subclass, ESC_DEV_DLE) == 0) {
is_dle = B_TRUE;
} else {
return (0);
}
if (is_dle)
ret = zfs_deliver_dle(nvl);
else if (is_check)
ret = zfs_deliver_check(nvl);
else
ret = zfs_deliver_add(nvl);
return (ret);
}
static void *
zfs_enum_pools(void *arg)
{
(void) arg;
(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
/*
* Linux - instead of using a thread pool, each list entry
* will spawn a thread when an unavailable pool transitions
* to available. zfs_slm_fini will wait for these threads.
*/
g_enumeration_done = B_TRUE;
return (NULL);
}
/*
* called from zed daemon at startup
*
* sent messages from zevents or udev monitor
*
* For now, each agent has its own libzfs instance
*/
int
zfs_slm_init(void)
{
if ((g_zfshdl = libzfs_init()) == NULL)
return (-1);
/*
* collect a list of unavailable pools (asynchronously,
* since this can take a while)
*/
list_create(&g_pool_list, sizeof (struct unavailpool),
offsetof(struct unavailpool, uap_node));
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
list_destroy(&g_pool_list);
libzfs_fini(g_zfshdl);
return (-1);
}
pthread_setname_np(g_zfs_tid, "enum-pools");
list_create(&g_device_list, sizeof (struct pendingdev),
offsetof(struct pendingdev, pd_node));
return (0);
}
void
zfs_slm_fini(void)
{
unavailpool_t *pool;
pendingdev_t *device;
/* wait for zfs_enum_pools thread to complete */
(void) pthread_join(g_zfs_tid, NULL);
/* destroy the thread pool */
if (g_tpool != NULL) {
tpool_wait(g_tpool);
tpool_destroy(g_tpool);
}
- while ((pool = (list_head(&g_pool_list))) != NULL) {
- list_remove(&g_pool_list, pool);
+ while ((pool = list_remove_head(&g_pool_list)) != NULL) {
zpool_close(pool->uap_zhp);
free(pool);
}
list_destroy(&g_pool_list);
- while ((device = (list_head(&g_device_list))) != NULL) {
- list_remove(&g_device_list, device);
+ while ((device = list_remove_head(&g_device_list)) != NULL)
free(device);
- }
list_destroy(&g_device_list);
libzfs_fini(g_zfshdl);
}
void
zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
{
zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
(void) zfs_slm_deliver_event(class, subclass, nvl);
}
diff --git a/sys/contrib/openzfs/cmd/zilstat.in b/sys/contrib/openzfs/cmd/zilstat.in
index cf4e2e0dd0c8..e8678e20cafa 100755
--- a/sys/contrib/openzfs/cmd/zilstat.in
+++ b/sys/contrib/openzfs/cmd/zilstat.in
@@ -1,467 +1,551 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# Print out statistics for all zil stats. This information is
# available through the zil kstat.
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# This script must remain compatible with Python 3.6+.
#
import sys
import subprocess
import time
import copy
import os
import re
import signal
from collections import defaultdict
import argparse
from argparse import RawTextHelpFormatter
cols = {
- # hdr: [size, scale, kstat name]
+ # hdr: [size, scale, kstat name]
"time": [8, -1, "time"],
"pool": [12, -1, "pool"],
"ds": [12, -1, "dataset_name"],
"obj": [12, -1, "objset"],
- "zcc": [10, 1000, "zil_commit_count"],
- "zcwc": [10, 1000, "zil_commit_writer_count"],
- "ziic": [10, 1000, "zil_itx_indirect_count"],
- "zic": [10, 1000, "zil_itx_count"],
- "ziib": [10, 1024, "zil_itx_indirect_bytes"],
- "zicc": [10, 1000, "zil_itx_copied_count"],
- "zicb": [10, 1024, "zil_itx_copied_bytes"],
- "zinc": [10, 1000, "zil_itx_needcopy_count"],
- "zinb": [10, 1024, "zil_itx_needcopy_bytes"],
- "zimnc": [10, 1000, "zil_itx_metaslab_normal_count"],
- "zimnb": [10, 1024, "zil_itx_metaslab_normal_bytes"],
- "zimsc": [10, 1000, "zil_itx_metaslab_slog_count"],
- "zimsb": [10, 1024, "zil_itx_metaslab_slog_bytes"],
+ "cc": [5, 1000, "zil_commit_count"],
+ "cwc": [5, 1000, "zil_commit_writer_count"],
+ "ic": [5, 1000, "zil_itx_count"],
+ "iic": [5, 1000, "zil_itx_indirect_count"],
+ "iib": [5, 1024, "zil_itx_indirect_bytes"],
+ "icc": [5, 1000, "zil_itx_copied_count"],
+ "icb": [5, 1024, "zil_itx_copied_bytes"],
+ "inc": [5, 1000, "zil_itx_needcopy_count"],
+ "inb": [5, 1024, "zil_itx_needcopy_bytes"],
+ "idc": [5, 1000, "icc+inc"],
+ "idb": [5, 1024, "icb+inb"],
+ "iwc": [5, 1000, "iic+idc"],
+ "iwb": [5, 1024, "iib+idb"],
+ "imnc": [6, 1000, "zil_itx_metaslab_normal_count"],
+ "imnb": [6, 1024, "zil_itx_metaslab_normal_bytes"],
+ "imnw": [6, 1024, "zil_itx_metaslab_normal_write"],
+ "imna": [6, 1024, "zil_itx_metaslab_normal_alloc"],
+ "imsc": [6, 1000, "zil_itx_metaslab_slog_count"],
+ "imsb": [6, 1024, "zil_itx_metaslab_slog_bytes"],
+ "imsw": [6, 1024, "zil_itx_metaslab_slog_write"],
+ "imsa": [6, 1024, "zil_itx_metaslab_slog_alloc"],
+ "imc": [5, 1000, "imnc+imsc"],
+ "imb": [5, 1024, "imnb+imsb"],
+ "imw": [5, 1024, "imnw+imsw"],
+ "ima": [5, 1024, "imna+imsa"],
+ "se%": [3, 100, "imb/ima"],
+ "sen%": [4, 100, "imnb/imna"],
+ "ses%": [4, 100, "imsb/imsa"],
+ "te%": [3, 100, "imb/imw"],
+ "ten%": [4, 100, "imnb/imnw"],
+ "tes%": [4, 100, "imsb/imsw"],
}
-hdr = ["time", "pool", "ds", "obj", "zcc", "zcwc", "ziic", "zic", "ziib", \
- "zicc", "zicb", "zinc", "zinb", "zimnc", "zimnb", "zimsc", "zimsb"]
+hdr = ["time", "ds", "cc", "ic", "idc", "idb", "iic", "iib",
+ "imnc", "imnw", "imsc", "imsw"]
-ghdr = ["time", "zcc", "zcwc", "ziic", "zic", "ziib", "zicc", "zicb",
- "zinc", "zinb", "zimnc", "zimnb", "zimsc", "zimsb"]
+ghdr = ["time", "cc", "ic", "idc", "idb", "iic", "iib",
+ "imnc", "imnw", "imsc", "imsw"]
cmd = ("Usage: zilstat [-hgdv] [-i interval] [-p pool_name]")
curr = {}
diff = {}
kstat = {}
ds_pairs = {}
pool_name = None
dataset_name = None
interval = 0
sep = " "
gFlag = True
dsFlag = False
def prettynum(sz, scale, num=0):
suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
index = 0
save = 0
if scale == -1:
return "%*s" % (sz, num)
# Rounding error, return 0
elif 0 < num < 1:
num = 0
while num > scale and index < 5:
save = num
num = num / scale
index += 1
if index == 0:
return "%*d" % (sz, num)
if (save / scale) < 10:
return "%*.1f%s" % (sz - 1, num, suffix[index])
else:
return "%*d%s" % (sz - 1, num, suffix[index])
def print_header():
global hdr
global sep
for col in hdr:
new_col = col
- if interval > 0 and col not in ['time', 'pool', 'ds', 'obj']:
+ if interval > 0 and cols[col][1] > 100:
new_col += "/s"
sys.stdout.write("%*s%s" % (cols[col][0], new_col, sep))
sys.stdout.write("\n")
def print_values(v):
global hdr
global sep
for col in hdr:
val = v[cols[col][2]]
- if col not in ['time', 'pool', 'ds', 'obj'] and interval > 0:
+ if interval > 0 and cols[col][1] > 100:
val = v[cols[col][2]] // interval
sys.stdout.write("%s%s" % (
prettynum(cols[col][0], cols[col][1], val), sep))
sys.stdout.write("\n")
def print_dict(d):
for pool in d:
for objset in d[pool]:
print_values(d[pool][objset])
def detailed_usage():
sys.stderr.write("%s\n" % cmd)
sys.stderr.write("Field definitions are as follows:\n")
for key in cols:
sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
sys.stderr.write("\n")
sys.exit(0)
def init():
global pool_name
global dataset_name
global interval
global hdr
global curr
global gFlag
global sep
curr = dict()
parser = argparse.ArgumentParser(description='Program to print zilstats',
add_help=True,
formatter_class=RawTextHelpFormatter,
epilog="\nUsage Examples\n"\
"Note: Global zilstats is shown by default,"\
" if none of a|p|d option is not provided\n"\
"\tzilstat -a\n"\
'\tzilstat -v\n'\
'\tzilstat -p tank\n'\
'\tzilstat -d tank/d1,tank/d2,tank/zv1\n'\
'\tzilstat -i 1\n'\
'\tzilstat -s \"***\"\n'\
'\tzilstat -f zcwc,zimnb,zimsb\n')
parser.add_argument(
"-v", "--verbose",
action="store_true",
help="List field headers and definitions"
)
pool_grp = parser.add_mutually_exclusive_group()
pool_grp.add_argument(
"-a", "--all",
action="store_true",
dest="all",
help="Print all dataset stats"
)
pool_grp.add_argument(
"-p", "--pool",
type=str,
help="Print stats for all datasets of a speicfied pool"
)
pool_grp.add_argument(
"-d", "--dataset",
type=str,
help="Print given dataset(s) (Comma separated)"
)
parser.add_argument(
"-f", "--columns",
type=str,
help="Specify specific fields to print (see -v)"
)
parser.add_argument(
"-s", "--separator",
type=str,
help="Override default field separator with custom "
"character or string"
)
parser.add_argument(
"-i", "--interval",
type=int,
dest="interval",
help="Print stats between specified interval"
" (in seconds)"
)
parsed_args = parser.parse_args()
if parsed_args.verbose:
detailed_usage()
if parsed_args.all:
gFlag = False
if parsed_args.interval:
interval = parsed_args.interval
if parsed_args.pool:
pool_name = parsed_args.pool
gFlag = False
if parsed_args.dataset:
dataset_name = parsed_args.dataset
gFlag = False
if parsed_args.separator:
sep = parsed_args.separator
if gFlag:
hdr = ghdr
if parsed_args.columns:
hdr = parsed_args.columns.split(",")
invalid = []
for ele in hdr:
- if gFlag and ele not in ghdr:
- invalid.append(ele)
- elif ele not in cols:
+ if ele not in cols:
invalid.append(ele)
if len(invalid) > 0:
sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
sys.exit(1)
if pool_name and dataset_name:
print ("Error: Can not filter both dataset and pool")
sys.exit(1)
def FileCheck(fname):
try:
return (open(fname))
except IOError:
print ("Unable to open zilstat proc file: " + fname)
sys.exit(1)
if sys.platform.startswith('freebsd'):
# Requires py-sysctl on FreeBSD
import sysctl
def kstat_update(pool = None, objid = None):
global kstat
kstat = {}
if not pool:
file = "kstat.zfs.misc.zil"
k = [ctl for ctl in sysctl.filter(file) \
if ctl.type != sysctl.CTLTYPE_NODE]
kstat_process_str(k, file, "GLOBAL", len(file + "."))
elif objid:
file = "kstat.zfs." + pool + ".dataset.objset-" + objid
k = [ctl for ctl in sysctl.filter(file) if ctl.type \
!= sysctl.CTLTYPE_NODE]
kstat_process_str(k, file, objid, len(file + "."))
else:
file = "kstat.zfs." + pool + ".dataset"
zil_start = len(file + ".")
obj_start = len("kstat.zfs." + pool + ".")
k = [ctl for ctl in sysctl.filter(file)
if ctl.type != sysctl.CTLTYPE_NODE]
for s in k:
if not s or (s.name.find("zil") == -1 and \
s.name.find("dataset_name") == -1):
continue
name, value = s.name, s.value
objid = re.findall(r'0x[0-9A-F]+', \
name[obj_start:], re.I)[0]
if objid not in kstat:
kstat[objid] = dict()
zil_start = len(file + ".objset-" + \
objid + ".")
kstat[objid][name[zil_start:]] = value \
if (name.find("dataset_name")) \
else int(value)
def kstat_process_str(k, file, objset = "GLOBAL", zil_start = 0):
global kstat
if not k:
print("Unable to process kstat for: " + file)
sys.exit(1)
kstat[objset] = dict()
for s in k:
if not s or (s.name.find("zil") == -1 and \
s.name.find("dataset_name") == -1):
continue
name, value = s.name, s.value
kstat[objset][name[zil_start:]] = value \
if (name.find("dataset_name")) else int(value)
elif sys.platform.startswith('linux'):
def kstat_update(pool = None, objid = None):
global kstat
kstat = {}
if not pool:
k = [line.strip() for line in \
FileCheck("/proc/spl/kstat/zfs/zil")]
kstat_process_str(k, "/proc/spl/kstat/zfs/zil")
elif objid:
file = "/proc/spl/kstat/zfs/" + pool + "/objset-" + objid
k = [line.strip() for line in FileCheck(file)]
kstat_process_str(k, file, objid)
else:
if not os.path.exists(f"/proc/spl/kstat/zfs/{pool}"):
print("Pool \"" + pool + "\" does not exist, Exitting")
sys.exit(1)
objsets = os.listdir(f'/proc/spl/kstat/zfs/{pool}')
for objid in objsets:
if objid.find("objset-") == -1:
continue
file = "/proc/spl/kstat/zfs/" + pool + "/" + objid
k = [line.strip() for line in FileCheck(file)]
kstat_process_str(k, file, objid.replace("objset-", ""))
def kstat_process_str(k, file, objset = "GLOBAL", zil_start = 0):
global kstat
if not k:
print("Unable to process kstat for: " + file)
sys.exit(1)
kstat[objset] = dict()
for s in k:
if not s or (s.find("zil") == -1 and \
s.find("dataset_name") == -1):
continue
name, unused, value = s.split()
kstat[objset][name] = value \
if (name == "dataset_name") else int(value)
def zil_process_kstat():
global curr, pool_name, dataset_name, dsFlag, ds_pairs
curr.clear()
if gFlag == True:
kstat_update()
zil_build_dict()
else:
if pool_name:
kstat_update(pool_name)
zil_build_dict(pool_name)
elif dataset_name:
if dsFlag == False:
dsFlag = True
datasets = dataset_name.split(',')
ds_pairs = defaultdict(list)
for ds in datasets:
try:
objid = subprocess.check_output(['zfs',
'list', '-Hpo', 'objsetid', ds], \
stderr=subprocess.DEVNULL) \
.decode('utf-8').strip()
except subprocess.CalledProcessError as e:
print("Command: \"zfs list -Hpo objset "\
+ str(ds) + "\" failed with error code:"\
+ str(e.returncode))
print("Please make sure that dataset \""\
+ str(ds) + "\" exists")
sys.exit(1)
if not objid:
continue
ds_pairs[ds.split('/')[0]]. \
append(hex(int(objid)))
for pool, objids in ds_pairs.items():
for objid in objids:
kstat_update(pool, objid)
zil_build_dict(pool)
else:
try:
pools = subprocess.check_output(['zpool', 'list', '-Hpo',\
'name']).decode('utf-8').split()
except subprocess.CalledProcessError as e:
print("Command: \"zpool list -Hpo name\" failed with error"\
"code: " + str(e.returncode))
sys.exit(1)
for pool in pools:
kstat_update(pool)
zil_build_dict(pool)
def calculate_diff():
global curr, diff
prev = copy.deepcopy(curr)
zil_process_kstat()
diff = copy.deepcopy(curr)
for pool in curr:
for objset in curr[pool]:
- for col in hdr:
- if col not in ['time', 'pool', 'ds', 'obj']:
- key = cols[col][2]
- # If prev is NULL, this is the
- # first time we are here
- if not prev:
- diff[pool][objset][key] = 0
- else:
- diff[pool][objset][key] \
- = curr[pool][objset][key] \
- - prev[pool][objset][key]
+ for key in curr[pool][objset]:
+ if not isinstance(diff[pool][objset][key], int):
+ continue
+ # If prev is NULL, this is the
+ # first time we are here
+ if not prev:
+ diff[pool][objset][key] = 0
+ else:
+ diff[pool][objset][key] \
+ = curr[pool][objset][key] \
+ - prev[pool][objset][key]
def zil_build_dict(pool = "GLOBAL"):
global kstat
for objset in kstat:
for key in kstat[objset]:
val = kstat[objset][key]
if pool not in curr:
curr[pool] = dict()
if objset not in curr[pool]:
curr[pool][objset] = dict()
curr[pool][objset][key] = val
- curr[pool][objset]["pool"] = pool
- curr[pool][objset]["objset"] = objset
- curr[pool][objset]["time"] = time.strftime("%H:%M:%S", \
- time.localtime())
+
+def zil_extend_dict():
+ global diff
+ for pool in diff:
+ for objset in diff[pool]:
+ diff[pool][objset]["pool"] = pool
+ diff[pool][objset]["objset"] = objset
+ diff[pool][objset]["time"] = time.strftime("%H:%M:%S", \
+ time.localtime())
+ diff[pool][objset]["icc+inc"] = \
+ diff[pool][objset]["zil_itx_copied_count"] + \
+ diff[pool][objset]["zil_itx_needcopy_count"]
+ diff[pool][objset]["icb+inb"] = \
+ diff[pool][objset]["zil_itx_copied_bytes"] + \
+ diff[pool][objset]["zil_itx_needcopy_bytes"]
+ diff[pool][objset]["iic+idc"] = \
+ diff[pool][objset]["zil_itx_indirect_count"] + \
+ diff[pool][objset]["zil_itx_copied_count"] + \
+ diff[pool][objset]["zil_itx_needcopy_count"]
+ diff[pool][objset]["iib+idb"] = \
+ diff[pool][objset]["zil_itx_indirect_bytes"] + \
+ diff[pool][objset]["zil_itx_copied_bytes"] + \
+ diff[pool][objset]["zil_itx_needcopy_bytes"]
+ diff[pool][objset]["imnc+imsc"] = \
+ diff[pool][objset]["zil_itx_metaslab_normal_count"] + \
+ diff[pool][objset]["zil_itx_metaslab_slog_count"]
+ diff[pool][objset]["imnb+imsb"] = \
+ diff[pool][objset]["zil_itx_metaslab_normal_bytes"] + \
+ diff[pool][objset]["zil_itx_metaslab_slog_bytes"]
+ diff[pool][objset]["imnw+imsw"] = \
+ diff[pool][objset]["zil_itx_metaslab_normal_write"] + \
+ diff[pool][objset]["zil_itx_metaslab_slog_write"]
+ diff[pool][objset]["imna+imsa"] = \
+ diff[pool][objset]["zil_itx_metaslab_normal_alloc"] + \
+ diff[pool][objset]["zil_itx_metaslab_slog_alloc"]
+ if diff[pool][objset]["imna+imsa"] > 0:
+ diff[pool][objset]["imb/ima"] = 100 * \
+ diff[pool][objset]["imnb+imsb"] // \
+ diff[pool][objset]["imna+imsa"]
+ else:
+ diff[pool][objset]["imb/ima"] = 100
+ if diff[pool][objset]["zil_itx_metaslab_normal_alloc"] > 0:
+ diff[pool][objset]["imnb/imna"] = 100 * \
+ diff[pool][objset]["zil_itx_metaslab_normal_bytes"] // \
+ diff[pool][objset]["zil_itx_metaslab_normal_alloc"]
+ else:
+ diff[pool][objset]["imnb/imna"] = 100
+ if diff[pool][objset]["zil_itx_metaslab_slog_alloc"] > 0:
+ diff[pool][objset]["imsb/imsa"] = 100 * \
+ diff[pool][objset]["zil_itx_metaslab_slog_bytes"] // \
+ diff[pool][objset]["zil_itx_metaslab_slog_alloc"]
+ else:
+ diff[pool][objset]["imsb/imsa"] = 100
+ if diff[pool][objset]["imnw+imsw"] > 0:
+ diff[pool][objset]["imb/imw"] = 100 * \
+ diff[pool][objset]["imnb+imsb"] // \
+ diff[pool][objset]["imnw+imsw"]
+ else:
+ diff[pool][objset]["imb/imw"] = 100
+ if diff[pool][objset]["zil_itx_metaslab_normal_alloc"] > 0:
+ diff[pool][objset]["imnb/imnw"] = 100 * \
+ diff[pool][objset]["zil_itx_metaslab_normal_bytes"] // \
+ diff[pool][objset]["zil_itx_metaslab_normal_write"]
+ else:
+ diff[pool][objset]["imnb/imnw"] = 100
+ if diff[pool][objset]["zil_itx_metaslab_slog_alloc"] > 0:
+ diff[pool][objset]["imsb/imsw"] = 100 * \
+ diff[pool][objset]["zil_itx_metaslab_slog_bytes"] // \
+ diff[pool][objset]["zil_itx_metaslab_slog_write"]
+ else:
+ diff[pool][objset]["imsb/imsw"] = 100
def sign_handler_epipe(sig, frame):
print("Caught EPIPE signal: " + str(frame))
print("Exitting...")
sys.exit(0)
def main():
global interval
- global curr
+ global curr, diff
hprint = False
init()
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, sign_handler_epipe)
+ zil_process_kstat()
+ if not curr:
+ print ("Error: No stats to show")
+ sys.exit(0)
+ print_header()
if interval > 0:
+ time.sleep(interval)
while True:
calculate_diff()
if not diff:
print ("Error: No stats to show")
sys.exit(0)
- if hprint == False:
- print_header()
- hprint = True
+ zil_extend_dict()
print_dict(diff)
time.sleep(interval)
else:
- zil_process_kstat()
- if not curr:
- print ("Error: No stats to show")
- sys.exit(0)
- print_header()
- print_dict(curr)
+ diff = curr
+ zil_extend_dict()
+ print_dict(diff)
if __name__ == '__main__':
main()
diff --git a/sys/contrib/openzfs/cmd/zpool/Makefile.am b/sys/contrib/openzfs/cmd/zpool/Makefile.am
index 3c7c8a9aebe2..d08b8e1791b6 100644
--- a/sys/contrib/openzfs/cmd/zpool/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zpool/Makefile.am
@@ -1,190 +1,199 @@
zpool_CFLAGS = $(AM_CFLAGS)
zpool_CFLAGS += $(LIBBLKID_CFLAGS) $(LIBUUID_CFLAGS)
zpool_CPPFLAGS = $(AM_CPPFLAGS)
zpool_CPPFLAGS += -I$(srcdir)/%D%
sbin_PROGRAMS += zpool
CPPCHECKTARGETS += zpool
zpool_SOURCES = \
%D%/zpool_iter.c \
%D%/zpool_main.c \
%D%/zpool_util.c \
%D%/zpool_util.h \
%D%/zpool_vdev.c
if BUILD_FREEBSD
zpool_SOURCES += \
%D%/os/freebsd/zpool_vdev_os.c
endif
if BUILD_LINUX
zpool_SOURCES += \
%D%/os/linux/zpool_vdev_os.c
endif
zpool_LDADD = \
libzfs.la \
libzfs_core.la \
libnvpair.la \
libuutil.la \
libzutil.la
zpool_LDADD += $(LTLIBINTL)
if BUILD_FREEBSD
zpool_LDADD += -lgeom
endif
zpool_LDADD += -lm $(LIBBLKID_LIBS) $(LIBUUID_LIBS)
dist_noinst_DATA += %D%/zpool.d/README
SHELLCHECKSCRIPTS += $(dist_zpoolexec_SCRIPTS)
zpoolexecdir = $(zfsexecdir)/zpool.d
dist_zpoolexec_SCRIPTS = \
%D%/zpool.d/ata_err \
%D%/zpool.d/cmd_to \
%D%/zpool.d/defect \
%D%/zpool.d/dm-deps \
%D%/zpool.d/enc \
%D%/zpool.d/encdev \
%D%/zpool.d/fault_led \
%D%/zpool.d/health \
%D%/zpool.d/hours_on \
%D%/zpool.d/iostat \
%D%/zpool.d/iostat-10s \
%D%/zpool.d/iostat-1s \
%D%/zpool.d/label \
%D%/zpool.d/locate_led \
%D%/zpool.d/lsblk \
%D%/zpool.d/media \
%D%/zpool.d/model \
%D%/zpool.d/nonmed \
%D%/zpool.d/nvme_err \
%D%/zpool.d/off_ucor \
%D%/zpool.d/pend_sec \
%D%/zpool.d/pwr_cyc \
%D%/zpool.d/r_proc \
%D%/zpool.d/r_ucor \
%D%/zpool.d/realloc \
%D%/zpool.d/rep_ucor \
%D%/zpool.d/serial \
%D%/zpool.d/ses \
%D%/zpool.d/size \
%D%/zpool.d/slot \
%D%/zpool.d/smart \
%D%/zpool.d/smart_test \
%D%/zpool.d/smartx \
%D%/zpool.d/temp \
%D%/zpool.d/test_ended \
%D%/zpool.d/test_progress \
%D%/zpool.d/test_status \
%D%/zpool.d/test_type \
%D%/zpool.d/upath \
%D%/zpool.d/vendor \
%D%/zpool.d/w_proc \
%D%/zpool.d/w_ucor
zpoolconfdefaults = \
dm-deps \
enc \
encdev \
fault_led \
iostat \
iostat-1s \
iostat-10s \
label \
locate_led \
lsblk \
media \
model \
serial \
ses \
size \
slot \
smart \
smartx \
temp \
health \
r_proc \
w_proc \
r_ucor \
w_ucor \
nonmed \
defect \
hours_on \
realloc \
rep_ucor \
cmd_to \
pend_sec \
off_ucor \
ata_err \
nvme_err \
pwr_cyc \
upath \
vendor \
smart_test \
test_type \
test_status \
test_progress \
test_ended
zpoolcompatdir = $(pkgdatadir)/compatibility.d
dist_zpoolcompat_DATA = \
%D%/compatibility.d/compat-2018 \
%D%/compatibility.d/compat-2019 \
%D%/compatibility.d/compat-2020 \
%D%/compatibility.d/compat-2021 \
%D%/compatibility.d/freebsd-11.0 \
%D%/compatibility.d/freebsd-11.2 \
%D%/compatibility.d/freebsd-11.3 \
%D%/compatibility.d/freenas-9.10.2 \
%D%/compatibility.d/grub2 \
%D%/compatibility.d/openzfs-2.0-freebsd \
%D%/compatibility.d/openzfs-2.0-linux \
%D%/compatibility.d/openzfs-2.1-freebsd \
%D%/compatibility.d/openzfs-2.1-linux \
+ %D%/compatibility.d/openzfs-2.2 \
%D%/compatibility.d/openzfsonosx-1.7.0 \
%D%/compatibility.d/openzfsonosx-1.8.1 \
%D%/compatibility.d/openzfsonosx-1.9.3 \
%D%/compatibility.d/zol-0.6.1 \
%D%/compatibility.d/zol-0.6.4 \
%D%/compatibility.d/zol-0.6.5 \
%D%/compatibility.d/zol-0.7 \
%D%/compatibility.d/zol-0.8
# canonical <- alias symbolic link pairs
# eg: "2018" is a link to "compat-2018"
zpoolcompatlinks = \
"compat-2018 2018" \
"compat-2019 2019" \
"compat-2020 2020" \
"compat-2021 2021" \
"freebsd-11.0 freebsd-11.1" \
"freebsd-11.0 freenas-11.0" \
"freebsd-11.2 freenas-11.2" \
"freebsd-11.3 freebsd-11.4" \
"freebsd-11.3 freebsd-12.0" \
"freebsd-11.3 freebsd-12.1" \
"freebsd-11.3 freebsd-12.2" \
+ "freebsd-11.3 freebsd-12.3" \
+ "freebsd-11.3 freebsd-12.4" \
+ "openzfs-2.1-freebsd freebsd-13.0" \
+ "openzfs-2.1-freebsd freebsd-13.1" \
+ "openzfs-2.1-freebsd freebsd-13.2" \
"freebsd-11.3 freenas-11.3" \
"freenas-11.0 freenas-11.1" \
"openzfsonosx-1.9.3 openzfsonosx-1.9.4" \
"openzfs-2.0-freebsd truenas-12.0" \
"zol-0.7 ubuntu-18.04" \
- "zol-0.8 ubuntu-20.04"
+ "zol-0.8 ubuntu-20.04" \
+ "openzfs-2.1-linux ubuntu-22.04" \
+ "openzfs-2.2 openzfs-2.2-linux" \
+ "openzfs-2.2 openzfs-2.2-freebsd"
zpoolconfdir = $(sysconfdir)/zfs/zpool.d
INSTALL_DATA_HOOKS += zpool-install-data-hook
zpool-install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zpoolconfdir)"
set -x; for f in $(zpoolconfdefaults); do \
[ -f "$(DESTDIR)$(zpoolconfdir)/$${f}" ] || \
[ -L "$(DESTDIR)$(zpoolconfdir)/$${f}" ] || \
$(LN_S) "$(zpoolexecdir)/$${f}" "$(DESTDIR)$(zpoolconfdir)"; \
done
set -x; printf '%s\n' $(zpoolcompatlinks) | \
while read -r canon alias; do \
$(LN_S) -f "$${canon}" "$(DESTDIR)$(zpoolcompatdir)/$${alias}"; \
done
diff --git a/sys/contrib/openzfs/cmd/zpool/compatibility.d/grub2 b/sys/contrib/openzfs/cmd/zpool/compatibility.d/grub2
index 4e8f21362554..fec73a269a78 100644
--- a/sys/contrib/openzfs/cmd/zpool/compatibility.d/grub2
+++ b/sys/contrib/openzfs/cmd/zpool/compatibility.d/grub2
@@ -1,12 +1,14 @@
# Features which are supported by GRUB2
async_destroy
bookmarks
embedded_data
empty_bpobj
enabled_txg
extensible_dataset
filesystem_limits
hole_birth
large_blocks
+livelist
lz4_compress
spacemap_histogram
+zpool_checkpoint
diff --git a/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.2 b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.2
new file mode 100644
index 000000000000..c9491cd8dc42
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.2
@@ -0,0 +1,40 @@
+# Features supported by OpenZFS 2.2 on Linux and FreeBSD
+allocation_classes
+async_destroy
+blake3
+block_cloning
+bookmark_v2
+bookmark_written
+bookmarks
+device_rebuild
+device_removal
+draid
+edonr
+embedded_data
+empty_bpobj
+enabled_txg
+encryption
+extensible_dataset
+filesystem_limits
+head_errlog
+hole_birth
+large_blocks
+large_dnode
+livelist
+log_spacemap
+lz4_compress
+multi_vdev_crash_dump
+obsolete_counts
+project_quota
+redacted_datasets
+redaction_bookmarks
+resilver_defer
+sha512
+skein
+spacemap_histogram
+spacemap_v2
+userobj_accounting
+vdev_zaps_v2
+zilsaxattr
+zpool_checkpoint
+zstd_compress
diff --git a/sys/contrib/openzfs/config/kernel-reclaim_state.m4 b/sys/contrib/openzfs/config/kernel-reclaim_state.m4
new file mode 100644
index 000000000000..9936b3c1001f
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-reclaim_state.m4
@@ -0,0 +1,26 @@
+AC_DEFUN([ZFS_AC_KERNEL_SRC_RECLAIMED], [
+ dnl #
+ dnl # 6.4 API change
+ dnl # The reclaimed_slab of struct reclaim_state
+ dnl # is renamed to reclaimed
+ dnl #
+ ZFS_LINUX_TEST_SRC([reclaim_state_reclaimed], [
+ #include <linux/swap.h>
+ static const struct reclaim_state
+ rs __attribute__ ((unused)) = {
+ .reclaimed = 100,
+ };
+ ],[])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_RECLAIMED], [
+ AC_MSG_CHECKING([whether struct reclaim_state has reclaimed field])
+ ZFS_LINUX_TEST_RESULT([reclaim_state_reclaimed], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_RECLAIM_STATE_RECLAIMED, 1,
+ [struct reclaim_state has reclaimed])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index 439ffdf5a898..cb7e736c9a43 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,1010 +1,1012 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_CONFIG_DEFINED
ZFS_AC_MODULE_SYMVERS
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_FADVISE
ZFS_AC_KERNEL_SRC_GENERIC_FADVISE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_GENHD_FLAGS
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_SETATTR
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_PERMISSION
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_READPAGES
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SYSFS
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_SRC_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_SRC_ADD_DISK
ZFS_AC_KERNEL_SRC_KTHREAD
ZFS_AC_KERNEL_SRC_ZERO_PAGE
ZFS_AC_KERNEL_SRC___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_SRC_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_SRC_IDMAP_MNT_API
ZFS_AC_KERNEL_SRC_IATTR_VFSID
ZFS_AC_KERNEL_SRC_FILEMAP
ZFS_AC_KERNEL_SRC_WRITEPAGE_T
+ ZFS_AC_KERNEL_SRC_RECLAIMED
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE
ZFS_AC_KERNEL_SRC_FLUSH_DCACHE_PAGE
;;
esac
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_FADVISE
ZFS_AC_KERNEL_GENERIC_FADVISE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_GENHD_FLAGS
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_SETATTR
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_PERMISSION
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_DENTRY_ALIAS_D_U
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_VFS_READ_FOLIO
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_READPAGES
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SYSFS
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_PAGEMAP_FOLIO_WAIT_BIT
ZFS_AC_KERNEL_ADD_DISK
ZFS_AC_KERNEL_KTHREAD
ZFS_AC_KERNEL_ZERO_PAGE
ZFS_AC_KERNEL___COPY_FROM_USER_INATOMIC
ZFS_AC_KERNEL_USER_NS_COMMON_INUM
ZFS_AC_KERNEL_IDMAP_MNT_API
ZFS_AC_KERNEL_IATTR_VFSID
ZFS_AC_KERNEL_FILEMAP
ZFS_AC_KERNEL_WRITEPAGE_T
+ ZFS_AC_KERNEL_RECLAIMED
case "$host_cpu" in
powerpc*)
ZFS_AC_KERNEL_CPU_HAS_FEATURE
ZFS_AC_KERNEL_FLUSH_DCACHE_PAGE
;;
esac
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
dnl # Most modern Linux distributions have separate locations for bare
dnl # source (source) and prebuilt (build) files. Additionally, there are
dnl # `source` and `build` symlinks in `/lib/modules/$(KERNEL_VERSION)`
dnl # pointing to them. The directory search order is now:
dnl #
dnl # - `configure` command line values if both `--with-linux` and
dnl # `--with-linux-obj` were defined
dnl #
dnl # - If only `--with-linux` was defined, `--with-linux-obj` is assumed
dnl # to have the same value as `--with-linux`
dnl #
dnl # - If neither `--with-linux` nor `--with-linux-obj` were defined
dnl # autodetection is used:
dnl #
dnl # - `/lib/modules/$(uname -r)/{source,build}` respectively, if exist.
dnl #
dnl # - If only `/lib/modules/$(uname -r)/build` exists, it is assumed
dnl # to be both source and build directory.
dnl #
dnl # - The first directory in `/lib/modules` with the highest version
dnl # number according to `sort -V` which contains both `source` and
dnl # `build` symlinks/directories. If module directory contains only
dnl # `build` component, it is assumed to be both source and build
dnl # directory.
dnl #
dnl # - Last resort: the first directory matching `/usr/src/kernels/*`
dnl # and `/usr/src/linux-*` with the highest version number according
dnl # to `sort -V` is assumed to be both source and build directory.
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source and build directories])
AS_IF([test -n "$kernelsrc" && test -z "$kernelbuild"], [
kernelbuild="$kernelsrc"
], [test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source" && \
test -e "/lib/modules/$(uname -r)/build"], [
src="/lib/modules/$(uname -r)/source"
build="/lib/modules/$(uname -r)/build"
], [test -e "/lib/modules/$(uname -r)/build"], [
build="/lib/modules/$(uname -r)/build"
src="$build"
], [
src=
for d in $(ls -1d /lib/modules/* 2>/dev/null | sort -Vr); do
if test -e "$d/source" && test -e "$d/build"; then
src="$d/source"
build="$d/build"
break
fi
if test -e "$d/build"; then
src="$d/build"
build="$d/build"
break
fi
done
# the least reliable method
if test -z "$src"; then
src=$(ls -1d /usr/src/kernels/* /usr/src/linux-* \
2>/dev/null | grep -v obj | sort -Vr | head -1)
build="$src"
fi
])
AS_IF([test -n "$src" && test -e "$src"], [
kernelsrc=$(readlink -e "$src")
], [
kernelsrc="[Not found]"
])
AS_IF([test -n "$build" && test -e "$build"], [
kernelbuild=$(readlink -e "$build")
], [
kernelbuild="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([done])
AC_MSG_CHECKING([kernel source directory])
AC_MSG_RESULT([$kernelsrc])
AC_MSG_CHECKING([kernel build directory])
AC_MSG_RESULT([$kernelbuild])
AS_IF([test ! -d "$kernelsrc" || test ! -d "$kernelbuild"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source and build with the '--with-linux=PATH' and
*** '--with-linux-obj=PATH' options respectively.])
])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && grep -qF UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && grep -qF UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && grep -qF UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_ARG_VAR([KERNEL_CC], [C compiler for
building kernel modules])
AC_ARG_VAR([KERNEL_LD], [Linker for
building kernel modules])
AC_ARG_VAR([KERNEL_LLVM], [Binary option to
build kernel modules with LLVM/CLANG toolchain])
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC}
${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM}
CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES
-C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4], [$5])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4], [$5])
])
])
dnl #
dnl # AS_VERSION_COMPARE_LE
dnl # like AS_VERSION_COMPARE_LE, but runs $3 if (and only if) $1 <= $2
dnl # AS_VERSION_COMPARE_LE (version-1, version-2, [action-if-less-or-equal], [action-if-greater])
dnl #
AC_DEFUN([AS_VERSION_COMPARE_LE], [
AS_VERSION_COMPARE([$1], [$2], [$3], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_REQUIRE_API
dnl # like ZFS_LINUX_TEST_ERROR, except only fails if the kernel is
dnl # at least some specified version.
dnl #
AC_DEFUN([ZFS_LINUX_REQUIRE_API], [
AS_VERSION_COMPARE_LE([$2], [$kernsrcver], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected. This
*** interface is expected for kernels version "$2" and above.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications. Newer kernels may have incompatible
*** APIs.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
], [
AC_MSG_RESULT(no)
])
])
diff --git a/sys/contrib/openzfs/contrib/debian/rules.in b/sys/contrib/openzfs/contrib/debian/rules.in
index 63892c6ca243..f0791cfabd38 100755
--- a/sys/contrib/openzfs/contrib/debian/rules.in
+++ b/sys/contrib/openzfs/contrib/debian/rules.in
@@ -1,223 +1,223 @@
#!/usr/bin/make -f
include /usr/share/dpkg/default.mk
LSB_DISTRIBUTOR := $(shell lsb_release -is)
NAME := $(shell awk '$$1 == "Name:" { print $$2; }' META)
LINUX_MIN := $(shell awk '/Linux-Minimum:/{print $$2}' META)
LINUX_NEXT := $(shell awk -F'[ .]' '/Linux-Maximum:/{print $$2 "." $$3+1}' META)
-DKMSFILES := module include config zfs.release.in autogen.sh META AUTHORS \
- COPYRIGHT LICENSE README.md
+DKMSFILES := module include config zfs.release.in autogen.sh copy-builtin META AUTHORS \
+ COPYRIGHT LICENSE README.md CODE_OF_CONDUCT.md NEWS NOTICE RELEASES.md
ifndef KVERS
KVERS=$(shell uname -r)
endif
non_epoch_version=$(shell echo $(KVERS) | perl -pe 's/^\d+://')
PACKAGE=openzfs-zfs
pmodules = $(PACKAGE)-modules-$(non_epoch_version)
export DEB_BUILD_MAINT_OPTIONS = hardening=+all
NUM_CPUS = $(shell nproc 2>/dev/null)
PARALLEL = $(subst parallel=,,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
NJOBS = -j$(or $(PARALLEL),$(NUM_CPUS),1)
%:
dh $@ --with autoreconf,dkms,python3,sphinxdoc
override_dh_autoreconf:
@# Embed the downstream version in the module.
@sed -e 's/^Version:.*/Version: $(DEB_VERSION_UPSTREAM)/' -i.orig META
dh_autoreconf
override_dh_auto_configure:
@# Build the userland, but don't build the kernel modules.
dh_auto_configure -- @CFGOPTS@ \
--bindir=/usr/bin \
--sbindir=/sbin \
--libdir=/lib/"$(DEB_HOST_MULTIARCH)" \
--with-udevdir=/lib/udev \
--with-zfsexecdir=/usr/lib/zfs-linux \
--enable-systemd \
--enable-pyzfs \
--with-python=python3 \
--with-pammoduledir='/lib/$(DEB_HOST_MULTIARCH)/security' \
--with-pkgconfigdir='/usr/lib/$(DEB_HOST_MULTIARCH)/pkgconfig' \
--with-systemdunitdir=/lib/systemd/system \
--with-systemdpresetdir=/lib/systemd/system-preset \
--with-systemdgeneratordir=/lib/systemd/system-generators \
--with-config=user
for i in $(wildcard $(CURDIR)/debian/*.install.in) ; do \
basename "$$i" | grep _KVERS_ && continue ; \
sed 's/@DEB_HOST_MULTIARCH@/$(DEB_HOST_MULTIARCH)/g' "$$i" > "$${i%%.in}" ; \
done
override_dh_gencontrol:
dh_gencontrol -- -Vlinux:Recommends="linux-libc-dev (<< $(LINUX_NEXT)~), linux-libc-dev (>= $(LINUX_MIN)~),"
override_dh_auto_build:
@# Get a bare copy of the source code for DKMS.
@# This creates the $(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/ tree, which does not
@# contain the userland sources. NB: Remove-userland-dist-rules.patch
$(MAKE) distdir
dh_auto_build
override_dh_auto_install:
@# Install the utilities.
$(MAKE) install DESTDIR='$(CURDIR)/debian/tmp'
# Use upstream's bash completion
install -D -t '$(CURDIR)/debian/tmp/usr/share/bash-completion/completions/' \
'$(CURDIR)/contrib/bash_completion.d/zfs'
# Move from bin_dir to /usr/sbin
# Remove suffix (.py) as per policy 10.4 - Scripts
# https://www.debian.org/doc/debian-policy/ch-files.html#s-scripts
mkdir -p '$(CURDIR)/debian/tmp/usr/sbin/'
mv '$(CURDIR)/debian/tmp/usr/bin/arc_summary' '$(CURDIR)/debian/tmp/usr/sbin/arc_summary'
mv '$(CURDIR)/debian/tmp/usr/bin/arcstat' '$(CURDIR)/debian/tmp/usr/sbin/arcstat'
mv '$(CURDIR)/debian/tmp/usr/bin/dbufstat' '$(CURDIR)/debian/tmp/usr/sbin/dbufstat'
mv '$(CURDIR)/debian/tmp/usr/bin/zilstat' '$(CURDIR)/debian/tmp/usr/sbin/zilstat'
@# Zed has dependencies outside of the system root.
mv '$(CURDIR)/debian/tmp/sbin/zed' '$(CURDIR)/debian/tmp/usr/sbin/zed'
@# Install the DKMS source.
@# We only want the files needed to build the modules
install -D -t '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/scripts' \
'$(CURDIR)/scripts/enum-extract.pl' \
'$(CURDIR)/scripts/dkms.postbuild'
$(foreach file,$(DKMSFILES),mv '$(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/$(file)' '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)' || exit 1;)
@# Only ever build Linux modules
echo 'SUBDIRS = linux' > '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/include/os/Makefile.am'
@# Hellish awk line:
@# * Deletes from configure.ac the parts not needed for building the kernel module
@# * It deletes from inside AC_CONFIG_FILES([]) everything except:
@# - Makefile$
@# - include/(Makefile|sys|os/(Makefile|linux))
@# - module/
@# - zfs.release$
@# * Takes care of spaces and tabs
@# * Remove reference to ZFS_AC_PACKAGE
awk '/^AC_CONFIG_FILES\(\[/,/^\]\)/ {\
if ($$0 !~ /^(AC_CONFIG_FILES\(\[([ \t]+)?$$|\]\)([ \t]+)?$$|([ \t]+)?(include\/(Makefile|sys|os\/(Makefile|linux))|module\/|Makefile([ \t]+)?$$|zfs\.release([ \t]+)?$$))/) \
{next} } {print}' \
'$(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/configure.ac' | sed '/ZFS_AC_PACKAGE/d' > '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/configure.ac'
@# Set "SUBDIRS = module include" for CONFIG_KERNEL and remove SUBDIRS for all other configs.
@# Do not regenerate zfs_gitrev.h during dkms build
sed '1,/CONFIG_KERNEL/s/SUBDIRS.*=.*//g;s/SUBDIRS.*=.*/SUBDIRS = module include/g;/make_gitrev.sh/d' \
'$(CURDIR)/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am' > '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am'
@# Sanity test
grep -q 'SUBDIRS = module include' '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am'
sed -i '/rpm.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/cmd.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/contrib.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/etc.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/lib.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/man.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/scripts.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/tests.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
sed -i '/udev.Makefile/d' $(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/Makefile.am
@# Run autogen on the stripped source tree
cd '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)'; ./autogen.sh
rm -fr '$(CURDIR)/debian/tmp/usr/src/$(NAME)-$(DEB_VERSION_UPSTREAM)/autom4te.cache'
for i in `ls $(CURDIR)/debian/tmp/lib/$(DEB_HOST_MULTIARCH)/*.so`; do \
ln -s '/lib/$(DEB_HOST_MULTIARCH)/'`readlink $${i}` '$(CURDIR)/debian/tmp/usr/lib/$(DEB_HOST_MULTIARCH)/'`basename $${i}`; \
rm $${i}; \
done
chmod a-x '$(CURDIR)/debian/tmp/etc/zfs/zfs-functions'
chmod a-x '$(CURDIR)/debian/tmp/etc/default/zfs'
chmod a-x '$(CURDIR)/debian/tmp/usr/share/bash-completion/completions/zfs'
override_dh_python3:
dh_python3 -p openzfs-python3-pyzfs
override_dh_dkms:
'$(CURDIR)/scripts/dkms.mkconf' -n $(NAME) -v $(DEB_VERSION_UPSTREAM) -f '$(CURDIR)/scripts/zfs-dkms.dkms'
dh_dkms
rm -f '$(CURDIR)/scripts/zfs-dkms.dkms'
override_dh_makeshlibs:
dh_makeshlibs -a -V
override_dh_strip:
dh_strip
override_dh_auto_clean:
rm -rf zfs-$(DEB_VERSION_UPSTREAM)
dh_auto_clean
@if test -e META.orig; then mv META.orig META; fi
override_dh_install:
find debian/tmp/lib -name '*.la' -delete
dh_install
override_dh_missing:
dh_missing --fail-missing
override_dh_installinit:
dh_installinit -r --no-restart-after-upgrade --name zfs-import
dh_installinit -r --no-restart-after-upgrade --name zfs-mount
dh_installinit -r --no-restart-after-upgrade --name zfs-load-key
dh_installinit -R --name zfs-share
dh_installinit -R --name zfs-zed
override_dh_installsystemd:
mkdir -p debian/openzfs-zfsutils/lib/systemd/system
ln -sr /dev/null debian/openzfs-zfsutils/lib/systemd/system/zfs-import.service
dh_installsystemd --no-stop-on-upgrade -X zfs-zed.service
dh_installsystemd --name zfs-zed
override_dh_installdocs:
dh_installdocs -A
ifeq (,$(findstring nodoc, $(DEB_BUILD_OPTIONS)))
http_proxy='127.0.0.1:9' sphinx-build -N -bhtml "$(CURDIR)/contrib/pyzfs/docs/source/" debian/openzfs-pyzfs-doc/usr/share/doc/openzfs-pyzfs-doc/html/
endif
# ------------
override_dh_prep-deb-files:
for templ in $(wildcard $(CURDIR)/debian/*_KVERS_*.in); do \
sed -e 's/##KVERS##/$(KVERS)/g ; s/#KVERS#/$(KVERS)/g ; s/_KVERS_/$(KVERS)/g ; s/##KDREV##/$(KDREV)/g ; s/#KDREV#/$(KDREV)/g ; s/_KDREV_/$(KDREV)/g ; s/_ARCH_/$(DEB_HOST_ARCH)/' \
< $$templ > `echo $$templ | sed -e 's/_KVERS_/$(KVERS)/g ; s/_ARCH_/$(DEB_HOST_ARCH)/g ; s/\.in$$//'` ; \
done
sed -e 's/##KVERS##/$(KVERS)/g ; s/#KVERS#/$(KVERS)/g ; s/_KVERS_/$(KVERS)/g ; s/##KDREV##/$(KDREV)/g ; s/#KDREV#/$(KDREV)/g ; s/_KDREV_/$(KDREV)/g ; s/_ARCH_/$(DEB_HOST_ARCH)/g' \
< debian/control.modules.in > debian/control
override_dh_configure_modules: override_dh_configure_modules_stamp
override_dh_configure_modules_stamp:
./configure @CFGOPTS@ \
--with-config=kernel \
--with-linux=$(KSRC) \
--with-linux-obj=$(KOBJ)
touch override_dh_configure_modules_stamp
override_dh_binary-modules: override_dh_prep-deb-files override_dh_configure_modules
dh_testdir
dh_testroot
dh_prep
$(MAKE) $(NJOBS) -C $(CURDIR)/module modules
dh_install -p${pmodules}
dh_installdocs -p${pmodules}
dh_installchangelogs -p${pmodules}
dh_compress -p${pmodules}
dh_strip -p${pmodules}
dh_fixperms -p${pmodules}
dh_installdeb -p${pmodules}
dh_gencontrol -p${pmodules}
dh_md5sums -p${pmodules}
dh_builddeb -p${pmodules}
debian-copyright:
cme update dpkg-copyright -file debian/copyright.cme
diff --git a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
index 7f977a30f75b..0a2bd2efda7a 100644
--- a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
+++ b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
@@ -1,1022 +1,1022 @@
# ZFS boot stub for initramfs-tools.
#
# In the initramfs environment, the /init script sources this stub to
# override the default functions in the /scripts/local script.
#
# Enable this by passing boot=zfs on the kernel command line.
#
# $quiet, $root, $rpool, $bootfs come from the cmdline:
# shellcheck disable=SC2154
# Source the common functions
. /etc/zfs/zfs-functions
# Start interactive shell.
# Use debian's panic() if defined, because it allows to prevent shell access
# by setting panic in cmdline (e.g. panic=0 or panic=15).
# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
shell() {
if command -v panic > /dev/null 2>&1; then
panic
else
/bin/sh
fi
}
# This runs any scripts that should run before we start importing
# pools and mounting any filesystems.
pre_mountroot()
{
if command -v run_scripts > /dev/null 2>&1
then
if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-top"
run_scripts /scripts/local-top
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-premount"
run_scripts /scripts/local-premount
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
fi
}
# If plymouth is available, hide the splash image.
disable_plymouth()
{
if [ -x /bin/plymouth ] && /bin/plymouth --ping
then
/bin/plymouth hide-splash >/dev/null 2>&1
fi
}
# Get a ZFS filesystem property value.
get_fs_value()
{
fs="$1"
value=$2
"${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
}
# Find the 'bootfs' property on pool $1.
# If the property does not contain '/', then ignore this
# pool by exporting it again.
find_rootfs()
{
pool="$1"
# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
# we won't be able to find a root fs.
[ -z "${POOL_IMPORTED}" ] && return 1
# If it's already specified, just keep it mounted and exit
# User (kernel command line) must be correct.
if [ -n "${ZFS_BOOTFS}" ] && [ "${ZFS_BOOTFS}" != "zfs:AUTO" ]; then
return 0
fi
# Not set, try to find it in the 'bootfs' property of the pool.
# NOTE: zpool does not support 'get -H -ovalue bootfs'...
ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
# Make sure it's not '-' and that it starts with /.
if [ "${ZFS_BOOTFS}" != "-" ] && \
get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
then
# Keep it mounted
POOL_IMPORTED=1
return 0
fi
# Not boot fs here, export it and later try again..
"${ZPOOL}" export "$pool"
POOL_IMPORTED=
ZFS_BOOTFS=
return 1
}
# Support function to get a list of all pools, separated with ';'
find_pools()
{
pools=$("$@" 2> /dev/null | \
sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
tr '\n' ';')
echo "${pools%%;}" # Return without the last ';'.
}
# Get a list of all available pools
get_pools()
{
if [ -n "${ZFS_POOL_IMPORT}" ]; then
echo "$ZFS_POOL_IMPORT"
return 0
fi
# Get the base list of available pools.
available_pools=$(find_pools "$ZPOOL" import)
# Just in case - seen it happen (that a pool isn't visible/found
# with a simple "zpool import" but only when using the "-d"
# option or setting ZPOOL_IMPORT_PATH).
if [ -d "/dev/disk/by-id" ]
then
npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
if [ -n "$npools" ]
then
# Because we have found extra pool(s) here, which wasn't
# found 'normally', we need to force USE_DISK_BY_ID to
# make sure we're able to actually import it/them later.
USE_DISK_BY_ID='yes'
if [ -n "$available_pools" ]
then
# Filter out duplicates (pools found with the simple
# "zpool import" but which is also found with the
# "zpool import -d ...").
npools=$(echo "$npools" | sed "s,$available_pools,,")
# Add the list to the existing list of
# available pools
available_pools="$available_pools;$npools"
else
available_pools="$npools"
fi
fi
fi
# Filter out any exceptions...
if [ -n "$ZFS_POOL_EXCEPTIONS" ]
then
found=""
apools=""
OLD_IFS="$IFS" ; IFS=";"
for pool in $available_pools
do
for exception in $ZFS_POOL_EXCEPTIONS
do
[ "$pool" = "$exception" ] && continue 2
found="$pool"
done
if [ -n "$found" ]
then
if [ -n "$apools" ]
then
apools="$apools;$pool"
else
apools="$pool"
fi
fi
done
IFS="$OLD_IFS"
available_pools="$apools"
fi
# Return list of available pools.
echo "$available_pools"
}
# Import given pool $1
import_pool()
{
pool="$1"
# Verify that the pool isn't already imported
# Make as sure as we can to not require '-f' to import.
"${ZPOOL}" get -H -o value name,guid 2>/dev/null | grep -Fxq "$pool" && return 0
# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
# to something we can use later with the real import(s). We want to
# make sure we find all by* dirs, BUT by-vdev should be first (if it
# exists).
if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
then
dirs="$(for dir in /dev/disk/by-*
do
# Ignore by-vdev here - we want it first!
echo "$dir" | grep -q /by-vdev && continue
[ ! -d "$dir" ] && continue
printf "%s" "$dir:"
done | sed 's,:$,,g')"
if [ -d "/dev/disk/by-vdev" ]
then
# Add by-vdev at the beginning.
ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
fi
# ... and /dev at the very end, just for good measure.
ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
fi
# Needs to be exported for "zpool" to catch it.
[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
[ "$quiet" != "y" ] && zfs_log_begin_msg \
"Importing pool '${pool}' using defaults"
ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
if [ -f "${ZPOOL_CACHE}" ]
then
[ "$quiet" != "y" ] && zfs_log_begin_msg \
"Importing pool '${pool}' using cachefile."
ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
ZFS_ERROR="$?"
fi
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} '$pool'"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to import pool '$pool'."
echo "Manually import the pool and exit."
shell
fi
fi
[ "$quiet" != "y" ] && zfs_log_end_msg
POOL_IMPORTED=1
return 0
}
# Load ZFS modules
# Loading a module in a initrd require a slightly different approach,
# with more logging etc.
load_module_initrd()
{
ZFS_INITRD_PRE_MOUNTROOT_SLEEP=${ROOTDELAY:-0}
if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
[ "$quiet" != "y" ] && zfs_log_begin_msg "Delaying for up to '${ZFS_INITRD_PRE_MOUNTROOT_SLEEP}' seconds."
fi
START=$(/bin/date -u +%s)
END=$((START+ZFS_INITRD_PRE_MOUNTROOT_SLEEP))
while true; do
# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
if command -v wait_for_udev > /dev/null 2>&1 ; then
wait_for_udev 10
elif command -v wait_for_dev > /dev/null 2>&1 ; then
wait_for_dev
fi
#
# zpool import refuse to import without a valid
# /proc/self/mounts
#
[ ! -f /proc/self/mounts ] && mount proc /proc
# Load the module
if load_module "zfs"; then
ret=0
break
else
ret=1
fi
[ "$(/bin/date -u +%s)" -gt "$END" ] && break
sleep 1
done
if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ]; then
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
[ "$ret" -ne 0 ] && return 1
if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
then
if [ "$quiet" != "y" ]; then
zfs_log_begin_msg "Sleeping for" \
"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
fi
sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Mount a given filesystem
mount_fs()
{
fs="$1"
# Check that the filesystem exists
"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 || return 1
# Skip filesystems with canmount=off. The root fs should not have
# canmount=off, but ignore it for backwards compatibility just in case.
if [ "$fs" != "${ZFS_BOOTFS}" ]
then
canmount=$(get_fs_value "$fs" canmount)
[ "$canmount" = "off" ] && return 0
fi
# Need the _original_ datasets mountpoint!
mountpoint=$(get_fs_value "$fs" mountpoint)
- ZFS_CMD="mount.zfs -o zfsutil"
+ ZFS_CMD="mount -o zfsutil -t zfs"
if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
# Can't use the mountpoint property. Might be one of our
# clones. Check the 'org.zol:mountpoint' property set in
# clone_snap() if that's usable.
mountpoint1=$(get_fs_value "$fs" org.zol:mountpoint)
if [ "$mountpoint1" = "legacy" ] ||
[ "$mountpoint1" = "none" ] ||
[ "$mountpoint1" = "-" ]
then
if [ "$fs" != "${ZFS_BOOTFS}" ]; then
# We don't have a proper mountpoint and this
# isn't the root fs.
return 0
fi
# Don't use mount.zfs -o zfsutils for legacy mountpoint
if [ "$mountpoint" = "legacy" ]; then
- ZFS_CMD="mount.zfs"
+ ZFS_CMD="mount -t zfs"
fi
# Last hail-mary: Hope 'rootmnt' is set!
mountpoint=""
else
mountpoint="$mountpoint1"
fi
fi
# Possibly decrypt a filesystem using native encryption.
decrypt_fs "$fs"
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
[ -n "${ZFS_DEBUG}" ] && \
zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
ZFS_ERROR=$?
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
echo "Manually mount the filesystem and exit."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Unlock a ZFS native encrypted filesystem.
decrypt_fs()
{
fs="$1"
# If pool encryption is active and the zfs command understands '-o encryption'
if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then
# Determine dataset that holds key for root dataset
ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
# If root dataset is encrypted...
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
# Continue only if the key needs to be loaded
[ "$KEYSTATUS" = "unavailable" ] || return 0
# Try extensions first
for f in "/etc/zfs/initramfs-tools-load-key" "/etc/zfs/initramfs-tools-load-key.d/"*; do
[ -r "$f" ] || continue
(. "$f") && {
# Successful return and actually-loaded key: we're done
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
[ "$KEYSTATUS" = "unavailable" ] || return 0
}
done
# Do not prompt if key is stored noninteractively,
if ! [ "${KEYLOCATION}" = "prompt" ]; then
$ZFS load-key "${ENCRYPTIONROOT}"
# Prompt with plymouth, if active
elif /bin/plymouth --ping 2>/dev/null; then
echo "plymouth" > /run/zfs_console_askpwd_cmd
for _ in 1 2 3; do
plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
done
# Prompt with systemd, if active
elif [ -e /run/systemd/system ]; then
echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
for _ in 1 2 3; do
systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
done
# Prompt with ZFS tty, otherwise
else
# Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
echo "load-key" > /run/zfs_console_askpwd_cmd
read -r storeprintk _ < /proc/sys/kernel/printk
echo 7 > /proc/sys/kernel/printk
$ZFS load-key "${ENCRYPTIONROOT}"
echo "$storeprintk" > /proc/sys/kernel/printk
fi
fi
fi
return 0
}
# Destroy a given filesystem.
destroy_fs()
{
fs="$1"
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Destroying '$fs'"
ZFS_CMD="${ZFS} destroy $fs"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
echo "Hint: Try: zfs destroy -Rfn $fs"
echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Clone snapshot $1 to destination filesystem $2
# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
# manual control over it's mounting (i.e., make sure it's not automatically
# mounted with a 'zfs mount -a' in the init/systemd scripts).
clone_snap()
{
snap="$1"
destfs="$2"
mountpoint="$3"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
# Clone the snapshot into a dataset we can boot from
# + We don't want this filesystem to be automatically mounted, we
# want control over this here and nowhere else.
# + We don't need any mountpoint set for the same reason.
# We use the 'org.zol:mountpoint' property to remember the mountpoint.
ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
ZFS_CMD="${ZFS_CMD} $snap $destfs"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to clone snapshot."
echo "Make sure that any problems are corrected and then make sure"
echo "that the dataset '$destfs' exists and is bootable."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Rollback a given snapshot.
rollback_snap()
{
snap="$1"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
ZFS_CMD="${ZFS} rollback -Rf $snap"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to rollback snapshot."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Get a list of snapshots, give them as a numbered list
# to the user to choose from.
ask_user_snap()
{
fs="$1"
# We need to temporarily disable debugging. Set 'debug' so we
# remember to enabled it again.
if [ -n "${ZFS_DEBUG}" ]; then
unset ZFS_DEBUG
set +x
debug=1
fi
# Because we need the resulting snapshot, which is sent on
# stdout to the caller, we use stderr for our questions.
echo "What snapshot do you want to boot from?" > /dev/stderr
# shellcheck disable=SC2046
IFS="
" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
i=1
for snap in "$@"; do
echo " $i: $snap"
i=$((i + 1))
done > /dev/stderr
# expr instead of test here because [ a -lt 0 ] errors out,
# but expr falls back to lexicographical, which works out right
snapnr=0
while expr "$snapnr" "<" 1 > /dev/null ||
expr "$snapnr" ">" "$#" > /dev/null
do
printf "%s" "Snap nr [1-$#]? " > /dev/stderr
read -r snapnr
done
# Re-enable debugging.
if [ -n "${debug}" ]; then
ZFS_DEBUG=1
set -x
fi
eval echo '$'"$snapnr"
}
setup_snapshot_booting()
{
snap="$1"
retval=0
# Make sure that the snapshot specified actually exists.
if [ -z "$(get_fs_value "${snap}" type)" ]
then
# Snapshot does not exist (...@<null> ?)
# ask the user for a snapshot to use.
snap="$(ask_user_snap "${snap%%@*}")"
fi
# Separate the full snapshot ('$snap') into it's filesystem and
# snapshot names. Would have been nice with a split() function..
rootfs="${snap%%@*}"
snapname="${snap##*@}"
ZFS_BOOTFS="${rootfs}_${snapname}"
if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
then
# If the destination dataset for the clone
# already exists, destroy it. Recursively
if [ -n "$(get_fs_value "${rootfs}_${snapname}" type)" ]
then
filesystems=$("${ZFS}" list -oname -tfilesystem -H \
-r -Sname "${ZFS_BOOTFS}")
for fs in $filesystems; do
destroy_fs "${fs}"
done
fi
fi
# Get all snapshots, recursively (might need to clone /usr, /var etc
# as well).
for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
grep "${snapname}")
do
if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
then
# Rollback snapshot
rollback_snap "$s" || retval=$((retval + 1))
ZFS_BOOTFS="${rootfs}"
else
# Setup a destination filesystem name.
# Ex: Called with 'rpool/ROOT/debian@snap2'
# rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
# rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
# rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
# rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
subfs="${s##"$rootfs"}"
subfs="${subfs%%@"$snapname"}"
destfs="${rootfs}_${snapname}" # base fs.
[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
# Get the mountpoint of the filesystem, to be used
# with clone_snap(). If legacy or none, then use
# the sub fs value.
mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
if [ "$mountpoint" = "legacy" ] || \
[ "$mountpoint" = "none" ]
then
if [ -n "${subfs}" ]; then
mountpoint="${subfs}"
else
mountpoint="/"
fi
fi
# Clone the snapshot into its own
# filesystem
clone_snap "$s" "${destfs}" "${mountpoint}" || \
retval=$((retval + 1))
fi
done
# If we haven't return yet, we have a problem...
return "${retval}"
}
# ================================================================
# This is the main function.
mountroot()
{
# ----------------------------------------------------------------
# I N I T I A L S E T U P
# ------------
# Run the pre-mount scripts from /scripts/local-top.
pre_mountroot
# ------------
# Source the default setup variables.
[ -r '/etc/default/zfs' ] && . /etc/default/zfs
# ------------
# Support debug option
if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
then
ZFS_DEBUG=1
mkdir /var/log
#exec 2> /var/log/boot.debug
set -x
fi
# ------------
# Load ZFS module etc.
if ! load_module_initrd; then
disable_plymouth
echo ""
echo "Failed to load ZFS modules."
echo "Manually load the modules and exit."
shell
fi
# ------------
# Look for the cache file (if any).
[ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
[ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
# ------------
# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
# 'root' is for Redhat/Fedora (etc),
# 'REAL_ROOT' is for Gentoo
if [ -z "$ROOT" ]
then
[ -n "$root" ] && ROOT=${root}
[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
fi
# ------------
# Where to mount the root fs in the initrd - set outside this script
# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
# 'NEWROOT' is for RedHat/Fedora (etc),
# 'NEW_ROOT' is for Gentoo
if [ -z "$rootmnt" ]
then
[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
fi
# ------------
# No longer set in the defaults file, but it could have been set in
# get_pools() in some circumstances. If it's something, but not 'yes',
# it's no good to us.
[ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
unset USE_DISK_BY_ID
# ----------------------------------------------------------------
# P A R S E C O M M A N D L I N E O P T I O N S
# This part is the really ugly part - there's so many options and permutations
# 'out there', and if we should make this the 'primary' source for ZFS initrd
# scripting, we need/should support them all.
#
# Supports the following kernel command line argument combinations
# (in this order - first match win):
#
# rpool=<pool> (tries to finds bootfs automatically)
# bootfs=<pool>/<dataset> (uses this for rpool - first part)
# rpool=<pool> bootfs=<pool>/<dataset>
# -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part)
# rpool=rpool (default if none of the above is used)
# root=<pool>/<dataset> (uses this for rpool - first part)
# root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=')
# root=zfs:AUTO (tries to detect both pool and rootfs)
# root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
#
# Option <dataset> could also be <snapshot>
# Option <pool> could also be <guid>
# ------------
# Support force option
# In addition, setting one of zfs_force, zfs.force or zfsforce to
# 'yes', 'on' or '1' will make sure we force import the pool.
# This should (almost) never be needed, but it's here for
# completeness.
ZPOOL_FORCE=""
if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
then
ZPOOL_FORCE="-f"
fi
# ------------
# Look for 'rpool' and 'bootfs' parameter
[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
# ------------
# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
# 'ROOT'
[ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
# ------------
# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
# NOTE: Only use the pool name and dataset. The rest is not
# supported by OpenZFS (whatever it's for).
if [ -z "$ZFS_RPOOL" ]
then
# The ${zfs-bootfs} variable is set at the kernel command
# line, usually by GRUB, but it cannot be referenced here
# directly because bourne variable names cannot contain a
# hyphen.
#
# Reassign the variable by dumping the environment and
# stripping the zfs-bootfs= prefix. Let the shell handle
# quoting through the eval command:
# shellcheck disable=SC2046
eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
fi
# ------------
# No root fs or pool specified - do auto detect.
if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
then
# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
# which will be caught later
ROOT='zfs:AUTO'
fi
# ----------------------------------------------------------------
# F I N D A N D I M P O R T C O R R E C T P O O L
# ------------
if [ "$ROOT" = "zfs:AUTO" ]
then
# Try to detect both pool and root fs.
# If we got here, that means we don't have a hint so as to
# the root dataset, but with root=zfs:AUTO on cmdline,
# this says "zfs:AUTO" here and interferes with checks later
ZFS_BOOTFS=
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Attempting to import additional pools."
# Get a list of pools available for import
if [ -n "$ZFS_RPOOL" ]
then
# We've specified a pool - check only that
POOLS=$ZFS_RPOOL
else
POOLS=$(get_pools)
fi
OLD_IFS="$IFS" ; IFS=";"
for pool in $POOLS
do
[ -z "$pool" ] && continue
IFS="$OLD_IFS" import_pool "$pool"
IFS="$OLD_IFS" find_rootfs "$pool" && break
done
IFS="$OLD_IFS"
[ "$quiet" != "y" ] && zfs_log_end_msg "$ZFS_ERROR"
else
# No auto - use value from the command line option.
# Strip 'zfs:' and 'ZFS='.
ZFS_BOOTFS="${ROOT#*[:=]}"
# Strip everything after the first slash.
ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
fi
# Import the pool (if not already done so in the AUTO check above).
if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
import_pool "${ZFS_RPOOL}"
find_rootfs "${ZFS_RPOOL}"
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
if [ -z "${POOL_IMPORTED}" ]
then
# No pool imported, this is serious!
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "No pool imported. Manually import the root pool"
echo "at the command prompt and then exit."
echo "Hint: Try: zpool import -N ${ZFS_RPOOL}"
shell
fi
# In case the pool was specified as guid, resolve guid to name
pool="$("${ZPOOL}" get -H -o name,value name,guid | \
awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
if [ -n "$pool" ]; then
# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
sed -e "s/$("${ZPOOL}" get -H -o value guid "$pool")/$pool/g")
ZFS_RPOOL="${pool}"
fi
# ----------------------------------------------------------------
# P R E P A R E R O O T F I L E S Y S T E M
if [ -n "${ZFS_BOOTFS}" ]
then
# Booting from a snapshot?
# Will overwrite the ZFS_BOOTFS variable like so:
# rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
echo "${ZFS_BOOTFS}" | grep -q '@' && \
setup_snapshot_booting "${ZFS_BOOTFS}"
fi
if [ -z "${ZFS_BOOTFS}" ]
then
# Still nothing! Let the user sort this out.
disable_plymouth
echo ""
echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
echo " not specified on the kernel command line."
echo ""
echo "Manually mount the root filesystem on $rootmnt and then exit."
- echo "Hint: Try: mount.zfs -o zfsutil ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
+ echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
shell
fi
# ----------------------------------------------------------------
# M O U N T F I L E S Y S T E M S
# * Ideally, the root filesystem would be mounted like this:
#
# zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
# zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
#
# but the MOUNTPOINT prefix is preserved on descendent filesystem
# after the pivot into the regular root, which later breaks things
# like `zfs mount -a` and the /proc/self/mounts refresh.
#
# * Mount additional filesystems required
# Such as /usr, /var, /usr/local etc.
# NOTE: Mounted in the order specified in the
# ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
# Go through the complete list (recursively) of all filesystems below
# the real root dataset
filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
OLD_IFS="$IFS" ; IFS="
"
for fs in $filesystems; do
IFS="$OLD_IFS" mount_fs "$fs"
done
IFS="$OLD_IFS"
for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
mount_fs "$fs"
done
touch /run/zfs_unlock_complete
if [ -e /run/zfs_unlock_complete_notify ]; then
read -r < /run/zfs_unlock_complete_notify
fi
# ------------
# Debugging information
if [ -n "${ZFS_DEBUG}" ]
then
#exec 2>&1-
echo "DEBUG: imported pools:"
"${ZPOOL}" list -H
echo
echo "DEBUG: mounted ZFS filesystems:"
mount | grep zfs
echo
echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
read -r b
[ "$b" = "c" ] && /bin/sh
[ "$b" = "r" ] && reboot -f
set +x
fi
# ------------
# Run local bottom script
if command -v run_scripts > /dev/null 2>&1
then
if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-bottom"
run_scripts /scripts/local-bottom
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
fi
}
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index 979546ab3090..08a8640669b3 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -1,883 +1,931 @@
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Copyright (c) 2020, Felix Dörre
* All rights reserved.
*/
#include <sys/dsl_crypt.h>
#include <sys/byteorder.h>
#include <libzfs.h>
#include <syslog.h>
#include <sys/zio_crypt.h>
#include <openssl/evp.h>
#define PAM_SM_AUTH
#define PAM_SM_PASSWORD
#define PAM_SM_SESSION
#include <security/pam_modules.h>
#if defined(__linux__)
#include <security/pam_ext.h>
#define MAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
#elif defined(__FreeBSD__)
#include <security/pam_appl.h>
static void
pam_syslog(pam_handle_t *pamh, int loglevel, const char *fmt, ...)
{
(void) pamh;
va_list args;
va_start(args, fmt);
vsyslog(loglevel, fmt, args);
va_end(args);
}
#define MAP_FLAGS MAP_PRIVATE | MAP_ANON | MAP_NOCORE
#endif
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <sys/wait.h>
#include <pwd.h>
#include <sys/mman.h>
static const char PASSWORD_VAR_NAME[] = "pam_zfs_key_authtok";
+static const char OLD_PASSWORD_VAR_NAME[] = "pam_zfs_key_oldauthtok";
static libzfs_handle_t *g_zfs;
static void destroy_pw(pam_handle_t *pamh, void *data, int errcode);
typedef int (*mlock_func_t) (const void *, size_t);
typedef struct {
size_t len;
char *value;
} pw_password_t;
/*
* Try to mlock(2) or munlock(2) addr while handling EAGAIN by retrying ten
* times and sleeping 10 milliseconds in between for a total of 0.1
* seconds. lock_func must point to either mlock(2) or munlock(2).
*/
static int
try_lock(mlock_func_t lock_func, const void *addr, size_t len)
{
int err;
int retries = 10;
useconds_t sleep_dur = 10 * 1000;
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
return (err);
}
for (int i = retries; i > 0; --i) {
(void) usleep(sleep_dur);
if ((err = (*lock_func)(addr, len)) != EAGAIN) {
break;
}
}
return (err);
}
static pw_password_t *
alloc_pw_size(size_t len)
{
pw_password_t *pw = malloc(sizeof (pw_password_t));
if (!pw) {
return (NULL);
}
pw->len = len;
/*
* We use mmap(2) rather than malloc(3) since later on we mlock(2) the
* memory region. Since mlock(2) and munlock(2) operate on whole memory
* pages we should allocate a whole page here as mmap(2) does. Further
* this ensures that the addresses passed to mlock(2) an munlock(2) are
* on a page boundary as suggested by FreeBSD and required by some
* other implementations. Finally we avoid inadvertently munlocking
* memory mlocked by an concurrently running instance of us.
*/
pw->value = mmap(NULL, pw->len, PROT_READ | PROT_WRITE, MAP_FLAGS,
-1, 0);
if (pw->value == MAP_FAILED) {
free(pw);
return (NULL);
}
if (try_lock(mlock, pw->value, pw->len) != 0) {
(void) munmap(pw->value, pw->len);
free(pw);
return (NULL);
}
return (pw);
}
static pw_password_t *
alloc_pw_string(const char *source)
{
size_t len = strlen(source) + 1;
pw_password_t *pw = alloc_pw_size(len);
if (!pw) {
return (NULL);
}
memcpy(pw->value, source, pw->len);
return (pw);
}
static void
pw_free(pw_password_t *pw)
{
memset(pw->value, 0, pw->len);
if (try_lock(munlock, pw->value, pw->len) == 0) {
(void) munmap(pw->value, pw->len);
}
free(pw);
}
static pw_password_t *
-pw_fetch(pam_handle_t *pamh)
+pw_fetch(pam_handle_t *pamh, int tok)
{
const char *token;
- if (pam_get_authtok(pamh, PAM_AUTHTOK, &token, NULL) != PAM_SUCCESS) {
+ if (pam_get_authtok(pamh, tok, &token, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get password from PAM stack");
return (NULL);
}
if (!token) {
pam_syslog(pamh, LOG_ERR,
"token from PAM stack is null");
return (NULL);
}
return (alloc_pw_string(token));
}
static const pw_password_t *
-pw_fetch_lazy(pam_handle_t *pamh)
+pw_fetch_lazy(pam_handle_t *pamh, int tok, const char *var_name)
{
- pw_password_t *pw = pw_fetch(pamh);
+ pw_password_t *pw = pw_fetch(pamh, tok);
if (pw == NULL) {
return (NULL);
}
- int ret = pam_set_data(pamh, PASSWORD_VAR_NAME, pw, destroy_pw);
+ int ret = pam_set_data(pamh, var_name, pw, destroy_pw);
if (ret != PAM_SUCCESS) {
pw_free(pw);
pam_syslog(pamh, LOG_ERR, "pam_set_data failed");
return (NULL);
}
return (pw);
}
static const pw_password_t *
-pw_get(pam_handle_t *pamh)
+pw_get(pam_handle_t *pamh, int tok, const char *var_name)
{
const pw_password_t *authtok = NULL;
- int ret = pam_get_data(pamh, PASSWORD_VAR_NAME,
+ int ret = pam_get_data(pamh, var_name,
(const void**)(&authtok));
if (ret == PAM_SUCCESS)
return (authtok);
if (ret == PAM_NO_MODULE_DATA)
- return (pw_fetch_lazy(pamh));
+ return (pw_fetch_lazy(pamh, tok, var_name));
pam_syslog(pamh, LOG_ERR, "password not available");
return (NULL);
}
static int
-pw_clear(pam_handle_t *pamh)
+pw_clear(pam_handle_t *pamh, const char *var_name)
{
- int ret = pam_set_data(pamh, PASSWORD_VAR_NAME, NULL, NULL);
+ int ret = pam_set_data(pamh, var_name, NULL, NULL);
if (ret != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR, "clearing password failed");
return (-1);
}
return (0);
}
static void
destroy_pw(pam_handle_t *pamh, void *data, int errcode)
{
(void) pamh, (void) errcode;
if (data != NULL) {
pw_free((pw_password_t *)data);
}
}
static int
pam_zfs_init(pam_handle_t *pamh)
{
int error = 0;
if ((g_zfs = libzfs_init()) == NULL) {
error = errno;
pam_syslog(pamh, LOG_ERR, "Zfs initialization error: %s",
libzfs_error_init(error));
}
return (error);
}
static void
pam_zfs_free(void)
{
libzfs_fini(g_zfs);
}
static pw_password_t *
prepare_passphrase(pam_handle_t *pamh, zfs_handle_t *ds,
const char *passphrase, nvlist_t *nvlist)
{
pw_password_t *key = alloc_pw_size(WRAPPING_KEY_LEN);
if (!key) {
return (NULL);
}
uint64_t salt;
uint64_t iters;
if (nvlist != NULL) {
int fd = open("/dev/urandom", O_RDONLY);
if (fd < 0) {
pw_free(key);
return (NULL);
}
int bytes_read = 0;
char *buf = (char *)&salt;
size_t bytes = sizeof (uint64_t);
while (bytes_read < bytes) {
ssize_t len = read(fd, buf + bytes_read, bytes
- bytes_read);
if (len < 0) {
close(fd);
pw_free(key);
return (NULL);
}
bytes_read += len;
}
close(fd);
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt)) {
pam_syslog(pamh, LOG_ERR,
"failed to add salt to nvlist");
pw_free(key);
return (NULL);
}
iters = DEFAULT_PBKDF2_ITERATIONS;
if (nvlist_add_uint64(nvlist, zfs_prop_to_name(
ZFS_PROP_PBKDF2_ITERS), iters)) {
pam_syslog(pamh, LOG_ERR,
"failed to add iters to nvlist");
pw_free(key);
return (NULL);
}
} else {
salt = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_SALT);
iters = zfs_prop_get_int(ds, ZFS_PROP_PBKDF2_ITERS);
}
salt = LE_64(salt);
if (!PKCS5_PBKDF2_HMAC_SHA1((char *)passphrase,
strlen(passphrase), (uint8_t *)&salt,
sizeof (uint64_t), iters, WRAPPING_KEY_LEN,
(uint8_t *)key->value)) {
pam_syslog(pamh, LOG_ERR, "pbkdf failed");
pw_free(key);
return (NULL);
}
return (key);
}
static int
is_key_loaded(pam_handle_t *pamh, const char *ds_name)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
int keystatus = zfs_prop_get_int(ds, ZFS_PROP_KEYSTATUS);
zfs_close(ds);
return (keystatus != ZFS_KEYSTATUS_UNAVAILABLE);
}
static int
change_key(pam_handle_t *pamh, const char *ds_name,
const char *passphrase)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
nvlist_t *nvlist = fnvlist_alloc();
pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, nvlist);
if (key == NULL) {
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_string(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
"prompt")) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keylocation");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
if (nvlist_add_uint64(nvlist,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
ZFS_KEYFORMAT_PASSPHRASE)) {
pam_syslog(pamh, LOG_ERR, "nvlist_add failed for keyformat");
pw_free(key);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
int ret = lzc_change_key(ds_name, DCP_CMD_NEW_KEY, nvlist,
(uint8_t *)key->value, WRAPPING_KEY_LEN);
pw_free(key);
if (ret) {
pam_syslog(pamh, LOG_ERR, "change_key failed: %d", ret);
nvlist_free(nvlist);
zfs_close(ds);
return (-1);
}
nvlist_free(nvlist);
zfs_close(ds);
return (0);
}
static int
decrypt_mount(pam_handle_t *pamh, const char *ds_name,
const char *passphrase, boolean_t noop)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
pw_password_t *key = prepare_passphrase(pamh, ds, passphrase, NULL);
if (key == NULL) {
zfs_close(ds);
return (-1);
}
int ret = lzc_load_key(ds_name, noop, (uint8_t *)key->value,
WRAPPING_KEY_LEN);
pw_free(key);
- if (ret) {
+ if (ret && ret != EEXIST) {
pam_syslog(pamh, LOG_ERR, "load_key failed: %d", ret);
zfs_close(ds);
return (-1);
}
if (noop) {
goto out;
}
ret = zfs_mount(ds, NULL, 0);
if (ret) {
pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret);
zfs_close(ds);
return (-1);
}
out:
zfs_close(ds);
return (0);
}
static int
-unmount_unload(pam_handle_t *pamh, const char *ds_name)
+unmount_unload(pam_handle_t *pamh, const char *ds_name, boolean_t force)
{
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) {
pam_syslog(pamh, LOG_ERR, "dataset %s not found", ds_name);
return (-1);
}
- int ret = zfs_unmount(ds, NULL, 0);
+ int ret = zfs_unmount(ds, NULL, force ? MS_FORCE : 0);
if (ret) {
pam_syslog(pamh, LOG_ERR, "zfs_unmount failed with: %d", ret);
zfs_close(ds);
return (-1);
}
ret = lzc_unload_key(ds_name);
if (ret) {
pam_syslog(pamh, LOG_ERR, "unload_key failed with: %d", ret);
zfs_close(ds);
return (-1);
}
zfs_close(ds);
return (0);
}
typedef struct {
char *homes_prefix;
char *runstatedir;
char *homedir;
char *dsname;
+ uid_t uid_min;
+ uid_t uid_max;
uid_t uid;
const char *username;
- int unmount_and_unload;
+ boolean_t unmount_and_unload;
+ boolean_t force_unmount;
+ boolean_t recursive_homes;
} zfs_key_config_t;
static int
zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
int argc, const char **argv)
{
config->homes_prefix = strdup("rpool/home");
if (config->homes_prefix == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
return (PAM_SERVICE_ERR);
}
config->runstatedir = strdup(RUNSTATEDIR "/pam_zfs_key");
if (config->runstatedir == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure");
free(config->homes_prefix);
return (PAM_SERVICE_ERR);
}
const char *name;
if (pam_get_user(pamh, &name, NULL) != PAM_SUCCESS) {
pam_syslog(pamh, LOG_ERR,
"couldn't get username from PAM stack");
free(config->runstatedir);
free(config->homes_prefix);
return (PAM_SERVICE_ERR);
}
struct passwd *entry = getpwnam(name);
if (!entry) {
free(config->runstatedir);
free(config->homes_prefix);
return (PAM_USER_UNKNOWN);
}
+ config->uid_min = 1000;
+ config->uid_max = MAXUID;
config->uid = entry->pw_uid;
config->username = name;
- config->unmount_and_unload = 1;
+ config->unmount_and_unload = B_TRUE;
+ config->force_unmount = B_FALSE;
+ config->recursive_homes = B_FALSE;
config->dsname = NULL;
config->homedir = NULL;
for (int c = 0; c < argc; c++) {
if (strncmp(argv[c], "homes=", 6) == 0) {
free(config->homes_prefix);
config->homes_prefix = strdup(argv[c] + 6);
} else if (strncmp(argv[c], "runstatedir=", 12) == 0) {
free(config->runstatedir);
config->runstatedir = strdup(argv[c] + 12);
+ } else if (strncmp(argv[c], "uid_min=", 8) == 0) {
+ sscanf(argv[c] + 8, "%u", &config->uid_min);
+ } else if (strncmp(argv[c], "uid_max=", 8) == 0) {
+ sscanf(argv[c] + 8, "%u", &config->uid_max);
} else if (strcmp(argv[c], "nounmount") == 0) {
- config->unmount_and_unload = 0;
+ config->unmount_and_unload = B_FALSE;
+ } else if (strcmp(argv[c], "forceunmount") == 0) {
+ config->force_unmount = B_TRUE;
+ } else if (strcmp(argv[c], "recursive_homes") == 0) {
+ config->recursive_homes = B_TRUE;
} else if (strcmp(argv[c], "prop_mountpoint") == 0) {
if (config->homedir == NULL)
config->homedir = strdup(entry->pw_dir);
}
}
return (PAM_SUCCESS);
}
static void
zfs_key_config_free(zfs_key_config_t *config)
{
free(config->homes_prefix);
free(config->runstatedir);
free(config->homedir);
free(config->dsname);
}
static int
find_dsname_by_prop_value(zfs_handle_t *zhp, void *data)
{
zfs_type_t type = zfs_get_type(zhp);
zfs_key_config_t *target = data;
char mountpoint[ZFS_MAXPROPLEN];
/* Skip any datasets whose type does not match */
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
/* Skip any datasets whose mountpoint does not match */
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE);
if (strcmp(target->homedir, mountpoint) != 0) {
+ if (target->recursive_homes) {
+ (void) zfs_iter_filesystems_v2(zhp, 0,
+ find_dsname_by_prop_value, target);
+ }
zfs_close(zhp);
- return (0);
+ return (target->dsname != NULL);
}
target->dsname = strdup(zfs_get_name(zhp));
zfs_close(zhp);
return (1);
}
static char *
zfs_key_config_get_dataset(zfs_key_config_t *config)
{
if (config->homedir != NULL &&
config->homes_prefix != NULL) {
- zfs_handle_t *zhp = zfs_open(g_zfs, config->homes_prefix,
- ZFS_TYPE_FILESYSTEM);
- if (zhp == NULL) {
- pam_syslog(NULL, LOG_ERR, "dataset %s not found",
- config->homes_prefix);
- return (NULL);
- }
+ if (strcmp(config->homes_prefix, "*") == 0) {
+ (void) zfs_iter_root(g_zfs,
+ find_dsname_by_prop_value, config);
+ } else {
+ zfs_handle_t *zhp = zfs_open(g_zfs,
+ config->homes_prefix, ZFS_TYPE_FILESYSTEM);
+ if (zhp == NULL) {
+ pam_syslog(NULL, LOG_ERR,
+ "dataset %s not found",
+ config->homes_prefix);
+ return (NULL);
+ }
- (void) zfs_iter_filesystems_v2(zhp, 0,
- find_dsname_by_prop_value, config);
- zfs_close(zhp);
+ (void) zfs_iter_filesystems_v2(zhp, 0,
+ find_dsname_by_prop_value, config);
+ zfs_close(zhp);
+ }
char *dsname = config->dsname;
config->dsname = NULL;
return (dsname);
}
if (config->homes_prefix == NULL) {
return (NULL);
}
size_t len = ZFS_MAX_DATASET_NAME_LEN;
size_t total_len = strlen(config->homes_prefix) + 1
+ strlen(config->username);
if (total_len > len) {
return (NULL);
}
char *ret = malloc(len + 1);
if (!ret) {
return (NULL);
}
ret[0] = 0;
(void) snprintf(ret, len + 1, "%s/%s", config->homes_prefix,
config->username);
return (ret);
}
static int
zfs_key_config_modify_session_counter(pam_handle_t *pamh,
zfs_key_config_t *config, int delta)
{
const char *runtime_path = config->runstatedir;
if (mkdir(runtime_path, S_IRWXU) != 0 && errno != EEXIST) {
pam_syslog(pamh, LOG_ERR, "Can't create runtime path: %d",
errno);
return (-1);
}
if (chown(runtime_path, 0, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chown runtime path: %d",
errno);
return (-1);
}
if (chmod(runtime_path, S_IRWXU) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't chmod runtime path: %d",
errno);
return (-1);
}
char *counter_path;
if (asprintf(&counter_path, "%s/%u", runtime_path, config->uid) == -1)
return (-1);
const int fd = open(counter_path,
O_RDWR | O_CLOEXEC | O_CREAT | O_NOFOLLOW,
S_IRUSR | S_IWUSR);
free(counter_path);
if (fd < 0) {
pam_syslog(pamh, LOG_ERR, "Can't open counter file: %d", errno);
return (-1);
}
if (flock(fd, LOCK_EX) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't lock counter file: %d", errno);
close(fd);
return (-1);
}
char counter[20];
char *pos = counter;
int remaining = sizeof (counter) - 1;
int ret;
counter[sizeof (counter) - 1] = 0;
while (remaining > 0 && (ret = read(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
*pos = 0;
long int counter_value = strtol(counter, NULL, 10);
counter_value += delta;
if (counter_value < 0) {
counter_value = 0;
}
lseek(fd, 0, SEEK_SET);
if (ftruncate(fd, 0) != 0) {
pam_syslog(pamh, LOG_ERR, "Can't truncate counter file: %d",
errno);
close(fd);
return (-1);
}
snprintf(counter, sizeof (counter), "%ld", counter_value);
remaining = strlen(counter);
pos = counter;
while (remaining > 0 && (ret = write(fd, pos, remaining)) > 0) {
remaining -= ret;
pos += ret;
}
close(fd);
return (counter_value);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_authenticate(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SERVICE_ERR);
}
zfs_key_config_t config;
int config_err = zfs_key_config_load(pamh, &config, argc, argv);
if (config_err != PAM_SUCCESS) {
return (config_err);
}
+ if (config.uid < config.uid_min || config.uid > config.uid_max) {
+ zfs_key_config_free(&config);
+ return (PAM_SERVICE_ERR);
+ }
- const pw_password_t *token = pw_fetch_lazy(pamh);
+ const pw_password_t *token = pw_fetch_lazy(pamh,
+ PAM_AUTHTOK, PASSWORD_VAR_NAME);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (decrypt_mount(pamh, dataset, token->value, B_TRUE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_setcred(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) pamh, (void) flags, (void) argc, (void) argv;
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_chauthtok(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_PERM_DENIED);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SERVICE_ERR);
}
- if (config.uid < 1000) {
+ if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
- return (PAM_SUCCESS);
+ return (PAM_SERVICE_ERR);
}
+ const pw_password_t *old_token = pw_get(pamh,
+ PAM_OLDAUTHTOK, OLD_PASSWORD_VAR_NAME);
{
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- int key_loaded = is_key_loaded(pamh, dataset);
- if (key_loaded == -1) {
+ if (!old_token) {
+ pam_syslog(pamh, LOG_ERR,
+ "old password from PAM stack is null");
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
- free(dataset);
- pam_zfs_free();
- if (! key_loaded) {
+ if (decrypt_mount(pamh, dataset,
+ old_token->value, B_TRUE) == -1) {
pam_syslog(pamh, LOG_ERR,
- "key not loaded, returning try_again");
+ "old token mismatch");
+ free(dataset);
+ pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_PERM_DENIED);
}
}
if ((flags & PAM_UPDATE_AUTHTOK) != 0) {
- const pw_password_t *token = pw_get(pamh);
+ const pw_password_t *token = pw_get(pamh, PAM_AUTHTOK,
+ PASSWORD_VAR_NAME);
if (token == NULL) {
+ pam_syslog(pamh, LOG_ERR, "new password unavailable");
+ pam_zfs_free();
zfs_key_config_free(&config);
- return (PAM_SERVICE_ERR);
- }
- if (pam_zfs_init(pamh) != 0) {
- zfs_key_config_free(&config);
+ pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
+ pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
+ pw_clear(pamh, PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
- if (change_key(pamh, dataset, token->value) == -1) {
+ int was_loaded = is_key_loaded(pamh, dataset);
+ if (!was_loaded && decrypt_mount(pamh, dataset,
+ old_token->value, B_FALSE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
+ pw_clear(pamh, OLD_PASSWORD_VAR_NAME);
+ pw_clear(pamh, PASSWORD_VAR_NAME);
return (PAM_SERVICE_ERR);
}
+ int changed = change_key(pamh, dataset, token->value);
+ if (!was_loaded) {
+ unmount_unload(pamh, dataset, config.force_unmount);
+ }
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
- if (pw_clear(pamh) == -1) {
+ if (pw_clear(pamh, OLD_PASSWORD_VAR_NAME) == -1 ||
+ pw_clear(pamh, PASSWORD_VAR_NAME) == -1 || changed == -1) {
return (PAM_SERVICE_ERR);
}
} else {
zfs_key_config_free(&config);
}
return (PAM_SUCCESS);
}
PAM_EXTERN int
pam_sm_open_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR);
}
- if (config.uid < 1000) {
+ if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, 1);
if (counter != 1) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
- const pw_password_t *token = pw_get(pamh);
+ const pw_password_t *token = pw_get(pamh,
+ PAM_AUTHTOK, PASSWORD_VAR_NAME);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (decrypt_mount(pamh, dataset, token->value, B_FALSE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
- if (pw_clear(pamh) == -1) {
+ if (pw_clear(pamh, PASSWORD_VAR_NAME) == -1) {
return (PAM_SERVICE_ERR);
}
return (PAM_SUCCESS);
}
__attribute__((visibility("default")))
PAM_EXTERN int
pam_sm_close_session(pam_handle_t *pamh, int flags,
int argc, const char **argv)
{
(void) flags;
if (geteuid() != 0) {
pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SUCCESS);
}
zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR);
}
- if (config.uid < 1000) {
+ if (config.uid < config.uid_min || config.uid > config.uid_max) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
int counter = zfs_key_config_modify_session_counter(pamh, &config, -1);
if (counter != 0) {
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
if (config.unmount_and_unload) {
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
- if (unmount_unload(pamh, dataset) == -1) {
+ if (unmount_unload(pamh, dataset, config.force_unmount) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SESSION_ERR);
}
free(dataset);
pam_zfs_free();
}
zfs_key_config_free(&config);
return (PAM_SUCCESS);
}
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/kmem.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/kmem.h
index 27d290863c0b..c633799318d5 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/kmem.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/kmem.h
@@ -1,113 +1,114 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_KMEM_H_
#define _OPENSOLARIS_SYS_KMEM_H_
#ifdef _KERNEL
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/vmem.h>
#include <sys/counter.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
MALLOC_DECLARE(M_SOLARIS);
#define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3))
#define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
#define KM_SLEEP M_WAITOK
#define KM_PUSHPAGE M_WAITOK
#define KM_NOSLEEP M_NOWAIT
#define KM_NORMALPRI 0
#define KMC_NODEBUG UMA_ZONE_NODUMP
typedef struct vmem vmem_t;
extern char *kmem_asprintf(const char *, ...)
__attribute__((format(printf, 1, 2)));
extern char *kmem_vasprintf(const char *fmt, va_list ap)
__attribute__((format(printf, 1, 0)));
extern int kmem_scnprintf(char *restrict str, size_t size,
const char *restrict fmt, ...);
typedef struct kmem_cache {
char kc_name[32];
#if !defined(KMEM_DEBUG)
uma_zone_t kc_zone;
#else
size_t kc_size;
#endif
int (*kc_constructor)(void *, void *, int);
void (*kc_destructor)(void *, void *);
void *kc_private;
} kmem_cache_t;
extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
-__attribute__((alloc_size(1)))
+__attribute__((malloc, alloc_size(1)))
void *zfs_kmem_alloc(size_t size, int kmflags);
void zfs_kmem_free(void *buf, size_t size);
uint64_t kmem_size(void);
kmem_cache_t *kmem_cache_create(const char *name, size_t bufsize, size_t align,
int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
void kmem_cache_destroy(kmem_cache_t *cache);
+__attribute__((malloc))
void *kmem_cache_alloc(kmem_cache_t *cache, int flags);
void kmem_cache_free(kmem_cache_t *cache, void *buf);
boolean_t kmem_cache_reap_active(void);
void kmem_cache_reap_soon(kmem_cache_t *);
void kmem_reap(void);
int kmem_debugging(void);
void *calloc(size_t n, size_t s);
#define kmem_cache_reap_now kmem_cache_reap_soon
#define freemem vm_free_count()
#define minfree vm_cnt.v_free_min
#define kmem_alloc(size, kmflags) zfs_kmem_alloc((size), (kmflags))
#define kmem_zalloc(size, kmflags) \
zfs_kmem_alloc((size), (kmflags) | M_ZERO)
#define kmem_free(buf, size) zfs_kmem_free((buf), (size))
#endif /* _KERNEL */
#ifdef _STANDALONE
/*
* At the moment, we just need it for the type. We redirect the alloc/free
* routines to the usual Free and Malloc in that environment.
*/
typedef int kmem_cache_t;
#endif /* _STANDALONE */
#endif /* _OPENSOLARIS_SYS_KMEM_H_ */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
index 09d109d191bf..8e20a9613539 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
@@ -1,205 +1,204 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Neskovic <neskovic@gmail.com>.
* Copyright (c) 2020 by Delphix. All rights reserved.
*/
#ifndef _MOD_COMPAT_H
#define _MOD_COMPAT_H
#include <linux/module.h>
#include <linux/moduleparam.h>
/*
* Despite constifying struct kernel_param_ops, some older kernels define a
* `__check_old_set_param()` function in their headers that checks for a
* non-constified `->set()`. This has long been fixed in Linux mainline, but
* since we support older kernels, we workaround it by using a preprocessor
* definition to disable it.
*/
#define __check_old_set_param(_) (0)
typedef const struct kernel_param zfs_kernel_param_t;
#define ZMOD_RW 0644
#define ZMOD_RD 0444
enum scope_prefix_types {
zfs,
zfs_arc,
zfs_brt,
zfs_condense,
zfs_dbuf,
zfs_dbuf_cache,
zfs_deadman,
zfs_dedup,
zfs_l2arc,
zfs_livelist,
zfs_livelist_condense,
zfs_lua,
zfs_metaslab,
zfs_mg,
zfs_multihost,
zfs_prefetch,
zfs_reconstruct,
zfs_recv,
zfs_send,
zfs_spa,
zfs_trim,
zfs_txg,
zfs_vdev,
- zfs_vdev_cache,
zfs_vdev_file,
zfs_vdev_mirror,
zfs_vnops,
zfs_zevent,
zfs_zio,
zfs_zil
};
/*
* While we define our own s64/u64 types, there is no reason to reimplement the
* existing Linux kernel types, so we use the preprocessor to remap our
* "custom" implementations to the kernel ones. This is done because the CPP
* does not allow us to write conditional definitions. The fourth definition
* exists because the CPP will not allow us to replace things like INT with int
* before string concatenation.
*/
#define spl_param_set_int param_set_int
#define spl_param_get_int param_get_int
#define spl_param_ops_int param_ops_int
#define spl_param_ops_INT param_ops_int
#define spl_param_set_long param_set_long
#define spl_param_get_long param_get_long
#define spl_param_ops_long param_ops_long
#define spl_param_ops_LONG param_ops_long
#define spl_param_set_uint param_set_uint
#define spl_param_get_uint param_get_uint
#define spl_param_ops_uint param_ops_uint
#define spl_param_ops_UINT param_ops_uint
#define spl_param_set_ulong param_set_ulong
#define spl_param_get_ulong param_get_ulong
#define spl_param_ops_ulong param_ops_ulong
#define spl_param_ops_ULONG param_ops_ulong
#define spl_param_set_charp param_set_charp
#define spl_param_get_charp param_get_charp
#define spl_param_ops_charp param_ops_charp
#define spl_param_ops_STRING param_ops_charp
int spl_param_set_s64(const char *val, zfs_kernel_param_t *kp);
extern int spl_param_get_s64(char *buffer, zfs_kernel_param_t *kp);
extern const struct kernel_param_ops spl_param_ops_s64;
#define spl_param_ops_S64 spl_param_ops_s64
extern int spl_param_set_u64(const char *val, zfs_kernel_param_t *kp);
extern int spl_param_get_u64(char *buffer, zfs_kernel_param_t *kp);
extern const struct kernel_param_ops spl_param_ops_u64;
#define spl_param_ops_U64 spl_param_ops_u64
/*
* Declare a module parameter / sysctl node
*
* "scope_prefix" the part of the sysctl / sysfs tree the node resides under
* (currently a no-op on Linux)
* "name_prefix" the part of the variable name that will be excluded from the
* exported names on platforms with a hierarchical namespace
* "name" the part of the variable that will be exposed on platforms with a
* hierarchical namespace, or as name_prefix ## name on Linux
* "type" the variable type
* "perm" the permissions (read/write or read only)
* "desc" a brief description of the option
*
* Examples:
* ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, UINT,
* ZMOD_RW, "Rotating media load increment for non-seeking I/O's");
* on FreeBSD:
* vfs.zfs.vdev.mirror.rotating_inc
* on Linux:
* zfs_vdev_mirror_rotating_inc
*
* ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
* "Limit one prefetch call to this size");
* on FreeBSD:
* vfs.zfs.dmu_prefetch_max
* on Linux:
* dmu_prefetch_max
*/
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc) \
_Static_assert( \
sizeof (scope_prefix) == sizeof (enum scope_prefix_types), \
"" #scope_prefix " size mismatch with enum scope_prefix_types"); \
module_param_cb(name_prefix ## name, &spl_param_ops_ ## type, \
&name_prefix ## name, perm); \
MODULE_PARM_DESC(name_prefix ## name, desc)
/*
* Declare a module parameter / sysctl node
*
* "scope_prefix" the part of the the sysctl / sysfs tree the node resides under
* (currently a no-op on Linux)
* "name_prefix" the part of the variable name that will be excluded from the
* exported names on platforms with a hierarchical namespace
* "name" the part of the variable that will be exposed on platforms with a
* hierarchical namespace, or as name_prefix ## name on Linux
* "setfunc" setter function
* "getfunc" getter function
* "perm" the permissions (read/write or read only)
* "desc" a brief description of the option
*
* Examples:
* ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
* param_get_int, ZMOD_RW, "Reserved free space in pool");
* on FreeBSD:
* vfs.zfs.spa_slop_shift
* on Linux:
* spa_slop_shift
*/
#define ZFS_MODULE_PARAM_CALL( \
scope_prefix, name_prefix, name, setfunc, getfunc, perm, desc) \
_Static_assert( \
sizeof (scope_prefix) == sizeof (enum scope_prefix_types), \
"" #scope_prefix " size mismatch with enum scope_prefix_types"); \
module_param_call(name_prefix ## name, setfunc, getfunc, \
&name_prefix ## name, perm); \
MODULE_PARM_DESC(name_prefix ## name, desc)
/*
* As above, but there is no variable with the name name_prefix ## name,
* so NULL is passed to module_param_call instead.
*/
#define ZFS_MODULE_VIRTUAL_PARAM_CALL( \
scope_prefix, name_prefix, name, setfunc, getfunc, perm, desc) \
_Static_assert( \
sizeof (scope_prefix) == sizeof (enum scope_prefix_types), \
"" #scope_prefix " size mismatch with enum scope_prefix_types"); \
module_param_call(name_prefix ## name, setfunc, getfunc, NULL, perm); \
MODULE_PARM_DESC(name_prefix ## name, desc)
#define ZFS_MODULE_PARAM_ARGS const char *buf, zfs_kernel_param_t *kp
#endif /* _MOD_COMPAT_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem.h b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem.h
index 594425f7b297..8a203f7bb8e2 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/kmem.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/kmem.h
@@ -1,219 +1,219 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_KMEM_H
#define _SPL_KMEM_H
#include <sys/debug.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
extern int kmem_debugging(void);
-extern char *kmem_vasprintf(const char *fmt, va_list ap)
- __attribute__((format(printf, 1, 0)));
-extern char *kmem_asprintf(const char *fmt, ...)
- __attribute__((format(printf, 1, 2)));
+__attribute__((format(printf, 1, 0)))
+extern char *kmem_vasprintf(const char *fmt, va_list ap);
+__attribute__((format(printf, 1, 2)))
+extern char *kmem_asprintf(const char *fmt, ...);
extern char *kmem_strdup(const char *str);
extern void kmem_strfree(char *str);
#define kmem_scnprintf scnprintf
#define POINTER_IS_VALID(p) (!((uintptr_t)(p) & 0x3))
#define POINTER_INVALIDATE(pp) (*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
/*
* Memory allocation interfaces
*/
#define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */
#define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
#define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */
#define KM_ZERO 0x1000 /* zero the allocation */
#define KM_VMEM 0x2000 /* caller is vmem_* wrapper */
#define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
static int spl_fstrans_check(void);
void *spl_kvmalloc(size_t size, gfp_t flags);
/*
* Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion
* function is context aware which means that KM_SLEEP allocations can be
* safely used in syncing contexts which have set PF_FSTRANS.
*/
static inline gfp_t
kmem_flags_convert(int flags)
{
gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
if (flags & KM_NOSLEEP) {
lflags |= GFP_ATOMIC | __GFP_NORETRY;
} else {
lflags |= GFP_KERNEL;
if (spl_fstrans_check())
lflags &= ~(__GFP_IO|__GFP_FS);
}
if (flags & KM_PUSHPAGE)
lflags |= __GFP_HIGH;
if (flags & KM_ZERO)
lflags |= __GFP_ZERO;
return (lflags);
}
typedef struct {
struct task_struct *fstrans_thread;
unsigned int saved_flags;
} fstrans_cookie_t;
/*
* Introduced in Linux 3.9, however this cannot be solely relied on before
* Linux 3.18 as it doesn't turn off __GFP_FS as it should.
*/
#ifdef PF_MEMALLOC_NOIO
#define __SPL_PF_MEMALLOC_NOIO (PF_MEMALLOC_NOIO)
#else
#define __SPL_PF_MEMALLOC_NOIO (0)
#endif
/*
* PF_FSTRANS is removed from Linux 4.12
*/
#ifdef PF_FSTRANS
#define __SPL_PF_FSTRANS (PF_FSTRANS)
#else
#define __SPL_PF_FSTRANS (0)
#endif
#define SPL_FSTRANS (__SPL_PF_FSTRANS|__SPL_PF_MEMALLOC_NOIO)
static inline fstrans_cookie_t
spl_fstrans_mark(void)
{
fstrans_cookie_t cookie;
BUILD_BUG_ON(SPL_FSTRANS == 0);
cookie.fstrans_thread = current;
cookie.saved_flags = current->flags & SPL_FSTRANS;
current->flags |= SPL_FSTRANS;
return (cookie);
}
static inline void
spl_fstrans_unmark(fstrans_cookie_t cookie)
{
ASSERT3P(cookie.fstrans_thread, ==, current);
ASSERT((current->flags & SPL_FSTRANS) == SPL_FSTRANS);
current->flags &= ~SPL_FSTRANS;
current->flags |= cookie.saved_flags;
}
static inline int
spl_fstrans_check(void)
{
return (current->flags & SPL_FSTRANS);
}
/*
* specifically used to check PF_FSTRANS flag, cannot be relied on for
* checking spl_fstrans_mark().
*/
static inline int
__spl_pf_fstrans_check(void)
{
return (current->flags & __SPL_PF_FSTRANS);
}
/*
* Kernel compatibility for GFP flags
*/
/* < 4.13 */
#ifndef __GFP_RETRY_MAYFAIL
#define __GFP_RETRY_MAYFAIL __GFP_REPEAT
#endif
/* < 4.4 */
#ifndef __GFP_RECLAIM
#define __GFP_RECLAIM __GFP_WAIT
#endif
#ifdef HAVE_ATOMIC64_T
#define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
#define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
#define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
#define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
extern atomic64_t kmem_alloc_used;
extern unsigned long long kmem_alloc_max;
#else /* HAVE_ATOMIC64_T */
#define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
#define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
#define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
#define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
extern atomic_t kmem_alloc_used;
extern unsigned long long kmem_alloc_max;
#endif /* HAVE_ATOMIC64_T */
extern unsigned int spl_kmem_alloc_warn;
extern unsigned int spl_kmem_alloc_max;
#define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__)
#define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
#define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz))
#define kmem_cache_reap_active spl_kmem_cache_reap_active
-extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
- __attribute__((alloc_size(1)));
-extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
- __attribute__((alloc_size(1)));
+__attribute__((malloc, alloc_size(1)))
+extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
+__attribute__((malloc, alloc_size(1)))
+extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
extern void spl_kmem_free(const void *ptr, size_t sz);
/*
* 5.8 API change, pgprot_t argument removed.
*/
#ifdef HAVE_VMALLOC_PAGE_KERNEL
#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL)
#else
#define spl_vmalloc(size, flags) __vmalloc(size, flags)
#endif
/*
* The following functions are only available for internal use.
*/
extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
extern void *spl_kmem_alloc_track(size_t size, int flags,
const char *func, int line, int node);
extern void spl_kmem_free_impl(const void *buf, size_t size);
extern void spl_kmem_free_debug(const void *buf, size_t size);
extern void spl_kmem_free_track(const void *buf, size_t size);
extern int spl_kmem_init(void);
extern void spl_kmem_fini(void);
extern int spl_kmem_cache_reap_active(void);
#endif /* _SPL_KMEM_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/vmem.h b/sys/contrib/openzfs/include/os/linux/spl/sys/vmem.h
index e77af2a7a48c..92585a17e263 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/vmem.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/vmem.h
@@ -1,101 +1,103 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_VMEM_H
#define _SPL_VMEM_H
#include <sys/kmem.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
typedef struct vmem { } vmem_t;
/*
* Memory allocation interfaces
*/
#define VMEM_ALLOC 0x01
#define VMEM_FREE 0x02
#ifndef VMALLOC_TOTAL
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
#endif
/*
* vmem_* is an interface to a low level arena-based memory allocator on
* Illumos that is used to allocate virtual address space. The kmem SLAB
* allocator allocates slabs from it. Then the generic allocation functions
* kmem_{alloc,zalloc,free}() are layered on top of SLAB allocators.
*
* On Linux, the primary means of doing allocations is via kmalloc(), which
* is similarly layered on top of something called the buddy allocator. The
* buddy allocator is not available to kernel modules, it uses physical
* memory addresses rather than virtual memory addresses and is prone to
* fragmentation.
*
* Linux sets aside a relatively small address space for in-kernel virtual
* memory from which allocations can be done using vmalloc(). It might seem
* like a good idea to use vmalloc() to implement something similar to
* Illumos' allocator. However, this has the following problems:
*
* 1. Page directory table allocations are hard coded to use GFP_KERNEL.
* Consequently, any KM_PUSHPAGE or KM_NOSLEEP allocations done using
* vmalloc() will not have proper semantics.
*
* 2. Address space exhaustion is a real issue on 32-bit platforms where
* only a few 100MB are available. The kernel will handle it by spinning
* when it runs out of address space.
*
* 3. All vmalloc() allocations and frees are protected by a single global
* lock which serializes all allocations.
*
* 4. Accessing /proc/meminfo and /proc/vmallocinfo will iterate the entire
* list. The former will sum the allocations while the latter will print
* them to user space in a way that user space can keep the lock held
* indefinitely. When the total number of mapped allocations is large
* (several 100,000) a large amount of time will be spent waiting on locks.
*
* 5. Linux has a wait_on_bit() locking primitive that assumes physical
* memory is used, it simply does not work on virtual memory. Certain
* Linux structures (e.g. the superblock) use them and might be embedded
* into a structure from Illumos. This makes using Linux virtual memory
* unsafe in certain situations.
*
* It follows that we cannot obtain identical semantics to those on Illumos.
* Consequently, we implement the kmem_{alloc,zalloc,free}() functions in
* such a way that they can be used as drop-in replacements for small vmem_*
* allocations (8MB in size or smaller) and map vmem_{alloc,zalloc,free}()
* to them.
*/
#define vmem_alloc(sz, fl) spl_vmem_alloc((sz), (fl), __func__, __LINE__)
#define vmem_zalloc(sz, fl) spl_vmem_zalloc((sz), (fl), __func__, __LINE__)
#define vmem_free(ptr, sz) spl_vmem_free((ptr), (sz))
-extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line);
-extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line);
+extern void *spl_vmem_alloc(size_t sz, int fl, const char *func, int line)
+ __attribute__((malloc, alloc_size(1)));
+extern void *spl_vmem_zalloc(size_t sz, int fl, const char *func, int line)
+ __attribute__((malloc, alloc_size(1)));
extern void spl_vmem_free(const void *ptr, size_t sz);
int spl_vmem_init(void);
void spl_vmem_fini(void);
#endif /* _SPL_VMEM_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
index 7bddd9d1f469..afa1a274e43c 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
@@ -1,233 +1,267 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
#if defined(_KERNEL)
#if defined(HAVE_DECLARE_EVENT_CLASS)
#undef TRACE_SYSTEM
#define TRACE_SYSTEM zfs
#undef TRACE_SYSTEM_VAR
#define TRACE_SYSTEM_VAR zfs_zil
#if !defined(_TRACE_ZIL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ZIL_H
#include <linux/tracepoint.h>
#include <sys/types.h>
#define ZILOG_TP_STRUCT_ENTRY \
__field(uint64_t, zl_lr_seq) \
__field(uint64_t, zl_commit_lr_seq) \
__field(uint64_t, zl_destroy_txg) \
__field(uint64_t, zl_replaying_seq) \
__field(uint32_t, zl_suspend) \
__field(uint8_t, zl_suspending) \
__field(uint8_t, zl_keep_first) \
__field(uint8_t, zl_replay) \
__field(uint8_t, zl_stop_sync) \
__field(uint8_t, zl_logbias) \
__field(uint8_t, zl_sync) \
__field(int, zl_parse_error) \
__field(uint64_t, zl_parse_blk_seq) \
__field(uint64_t, zl_parse_lr_seq) \
__field(uint64_t, zl_parse_blk_count) \
__field(uint64_t, zl_parse_lr_count) \
__field(uint64_t, zl_cur_used) \
__field(clock_t, zl_replay_time) \
__field(uint64_t, zl_replay_blks)
#define ZILOG_TP_FAST_ASSIGN \
__entry->zl_lr_seq = zilog->zl_lr_seq; \
__entry->zl_commit_lr_seq = zilog->zl_commit_lr_seq; \
__entry->zl_destroy_txg = zilog->zl_destroy_txg; \
__entry->zl_replaying_seq = zilog->zl_replaying_seq; \
__entry->zl_suspend = zilog->zl_suspend; \
__entry->zl_suspending = zilog->zl_suspending; \
__entry->zl_keep_first = zilog->zl_keep_first; \
__entry->zl_replay = zilog->zl_replay; \
__entry->zl_stop_sync = zilog->zl_stop_sync; \
__entry->zl_logbias = zilog->zl_logbias; \
__entry->zl_sync = zilog->zl_sync; \
__entry->zl_parse_error = zilog->zl_parse_error; \
__entry->zl_parse_blk_seq = zilog->zl_parse_blk_seq; \
__entry->zl_parse_lr_seq = zilog->zl_parse_lr_seq; \
__entry->zl_parse_blk_count = zilog->zl_parse_blk_count;\
__entry->zl_parse_lr_count = zilog->zl_parse_lr_count; \
__entry->zl_cur_used = zilog->zl_cur_used; \
__entry->zl_replay_time = zilog->zl_replay_time; \
__entry->zl_replay_blks = zilog->zl_replay_blks;
#define ZILOG_TP_PRINTK_FMT \
"zl { lr_seq %llu commit_lr_seq %llu destroy_txg %llu " \
"replaying_seq %llu suspend %u suspending %u keep_first %u " \
"replay %u stop_sync %u logbias %u sync %u " \
"parse_error %u parse_blk_seq %llu parse_lr_seq %llu " \
"parse_blk_count %llu parse_lr_count %llu " \
"cur_used %llu replay_time %lu replay_blks %llu }"
#define ZILOG_TP_PRINTK_ARGS \
__entry->zl_lr_seq, __entry->zl_commit_lr_seq, \
__entry->zl_destroy_txg, __entry->zl_replaying_seq, \
__entry->zl_suspend, __entry->zl_suspending, \
__entry->zl_keep_first, __entry->zl_replay, \
__entry->zl_stop_sync, __entry->zl_logbias, __entry->zl_sync, \
__entry->zl_parse_error, __entry->zl_parse_blk_seq, \
__entry->zl_parse_lr_seq, __entry->zl_parse_blk_count, \
__entry->zl_parse_lr_count, __entry->zl_cur_used, \
__entry->zl_replay_time, __entry->zl_replay_blks
#define ITX_TP_STRUCT_ENTRY \
__field(itx_wr_state_t, itx_wr_state) \
__field(uint8_t, itx_sync) \
__field(zil_callback_t, itx_callback) \
__field(void *, itx_callback_data) \
__field(uint64_t, itx_oid) \
\
__field(uint64_t, lrc_txtype) \
__field(uint64_t, lrc_reclen) \
__field(uint64_t, lrc_txg) \
__field(uint64_t, lrc_seq)
#define ITX_TP_FAST_ASSIGN \
__entry->itx_wr_state = itx->itx_wr_state; \
__entry->itx_sync = itx->itx_sync; \
__entry->itx_callback = itx->itx_callback; \
__entry->itx_callback_data = itx->itx_callback_data; \
__entry->itx_oid = itx->itx_oid; \
\
__entry->lrc_txtype = itx->itx_lr.lrc_txtype; \
__entry->lrc_reclen = itx->itx_lr.lrc_reclen; \
__entry->lrc_txg = itx->itx_lr.lrc_txg; \
__entry->lrc_seq = itx->itx_lr.lrc_seq;
#define ITX_TP_PRINTK_FMT \
"itx { wr_state %u sync %u callback %p callback_data %p oid %llu" \
" { txtype %llu reclen %llu txg %llu seq %llu } }"
#define ITX_TP_PRINTK_ARGS \
__entry->itx_wr_state, __entry->itx_sync, __entry->itx_callback,\
__entry->itx_callback_data, __entry->itx_oid, \
__entry->lrc_txtype, __entry->lrc_reclen, __entry->lrc_txg, \
__entry->lrc_seq
#define ZCW_TP_STRUCT_ENTRY \
__field(lwb_t *, zcw_lwb) \
__field(boolean_t, zcw_done) \
__field(int, zcw_zio_error) \
#define ZCW_TP_FAST_ASSIGN \
__entry->zcw_lwb = zcw->zcw_lwb; \
__entry->zcw_done = zcw->zcw_done; \
__entry->zcw_zio_error = zcw->zcw_zio_error;
#define ZCW_TP_PRINTK_FMT \
"zcw { lwb %p done %u error %u }"
#define ZCW_TP_PRINTK_ARGS \
__entry->zcw_lwb, __entry->zcw_done, __entry->zcw_zio_error
/*
* Generic support for two argument tracepoints of the form:
*
* DTRACE_PROBE2(...,
* zilog_t *, ...,
* itx_t *, ...);
*/
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wordered-compare-function-pointers"
#endif
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_zil_process_itx_class,
TP_PROTO(zilog_t *zilog, itx_t *itx),
TP_ARGS(zilog, itx),
TP_STRUCT__entry(
ZILOG_TP_STRUCT_ENTRY
ITX_TP_STRUCT_ENTRY
),
TP_fast_assign(
ZILOG_TP_FAST_ASSIGN
ITX_TP_FAST_ASSIGN
),
TP_printk(
ZILOG_TP_PRINTK_FMT " " ITX_TP_PRINTK_FMT,
ZILOG_TP_PRINTK_ARGS, ITX_TP_PRINTK_ARGS)
);
/* END CSTYLED */
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#define DEFINE_ZIL_PROCESS_ITX_EVENT(name) \
DEFINE_EVENT(zfs_zil_process_itx_class, name, \
TP_PROTO(zilog_t *zilog, itx_t *itx), \
TP_ARGS(zilog, itx))
DEFINE_ZIL_PROCESS_ITX_EVENT(zfs_zil__process__commit__itx);
DEFINE_ZIL_PROCESS_ITX_EVENT(zfs_zil__process__normal__itx);
/*
* Generic support for two argument tracepoints of the form:
*
* DTRACE_PROBE2(...,
* zilog_t *, ...,
* zil_commit_waiter_t *, ...);
*/
/* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_zil_commit_io_error_class,
TP_PROTO(zilog_t *zilog, zil_commit_waiter_t *zcw),
TP_ARGS(zilog, zcw),
TP_STRUCT__entry(
ZILOG_TP_STRUCT_ENTRY
ZCW_TP_STRUCT_ENTRY
),
TP_fast_assign(
ZILOG_TP_FAST_ASSIGN
ZCW_TP_FAST_ASSIGN
),
TP_printk(
ZILOG_TP_PRINTK_FMT " " ZCW_TP_PRINTK_FMT,
ZILOG_TP_PRINTK_ARGS, ZCW_TP_PRINTK_ARGS)
);
#define DEFINE_ZIL_COMMIT_IO_ERROR_EVENT(name) \
DEFINE_EVENT(zfs_zil_commit_io_error_class, name, \
TP_PROTO(zilog_t *zilog, zil_commit_waiter_t *zcw), \
TP_ARGS(zilog, zcw))
DEFINE_ZIL_COMMIT_IO_ERROR_EVENT(zfs_zil__commit__io__error);
+/*
+ * Generic support for three argument tracepoints of the form:
+ *
+ * DTRACE_PROBE3(...,
+ * zilog_t *, ...,
+ * uint64_t, ...,
+ * uint64_t, ...);
+ */
+/* BEGIN CSTYLED */
+DECLARE_EVENT_CLASS(zfs_zil_block_size_class,
+ TP_PROTO(zilog_t *zilog, uint64_t res, uint64_t s1),
+ TP_ARGS(zilog, res, s1),
+ TP_STRUCT__entry(
+ ZILOG_TP_STRUCT_ENTRY
+ __field(uint64_t, res)
+ __field(uint64_t, s1)
+ ),
+ TP_fast_assign(
+ ZILOG_TP_FAST_ASSIGN
+ __entry->res = res;
+ __entry->s1 = s1;
+ ),
+ TP_printk(
+ ZILOG_TP_PRINTK_FMT " res %llu s1 %llu",
+ ZILOG_TP_PRINTK_ARGS, __entry->res, __entry->s1)
+);
+
+#define DEFINE_ZIL_BLOCK_SIZE_EVENT(name) \
+DEFINE_EVENT(zfs_zil_block_size_class, name, \
+ TP_PROTO(zilog_t *zilog, uint64_t res, uint64_t s1), \
+ TP_ARGS(zilog, res, s1))
+DEFINE_ZIL_BLOCK_SIZE_EVENT(zfs_zil__block__size);
+
#endif /* _TRACE_ZIL_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH sys
#define TRACE_INCLUDE_FILE trace_zil
#include <trace/define_trace.h>
#else
DEFINE_DTRACE_PROBE2(zil__process__commit__itx);
DEFINE_DTRACE_PROBE2(zil__process__normal__itx);
DEFINE_DTRACE_PROBE2(zil__commit__io__error);
+DEFINE_DTRACE_PROBE3(zil__block__size);
#endif /* HAVE_DECLARE_EVENT_CLASS */
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/include/sys/abd.h b/sys/contrib/openzfs/include/sys/abd.h
index 82c51cb05cbc..750f9986c1da 100644
--- a/sys/contrib/openzfs/include/sys/abd.h
+++ b/sys/contrib/openzfs/include/sys/abd.h
@@ -1,221 +1,226 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2016, 2019 by Delphix. All rights reserved.
*/
#ifndef _ABD_H
#define _ABD_H
#include <sys/isa_defs.h>
#include <sys/debug.h>
#include <sys/zfs_refcount.h>
#include <sys/uio.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum abd_flags {
ABD_FLAG_LINEAR = 1 << 0, /* is buffer linear (or scattered)? */
ABD_FLAG_OWNER = 1 << 1, /* does it own its data buffers? */
ABD_FLAG_META = 1 << 2, /* does this represent FS metadata? */
ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */
ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */
ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */
ABD_FLAG_GANG = 1 << 6, /* mult ABDs chained together */
ABD_FLAG_GANG_FREE = 1 << 7, /* gang ABD is responsible for mem */
ABD_FLAG_ZEROS = 1 << 8, /* ABD for zero-filled buffer */
ABD_FLAG_ALLOCD = 1 << 9, /* we allocated the abd_t */
} abd_flags_t;
typedef struct abd {
abd_flags_t abd_flags;
uint_t abd_size; /* excludes scattered abd_offset */
list_node_t abd_gang_link;
#ifdef ZFS_DEBUG
struct abd *abd_parent;
zfs_refcount_t abd_children;
#endif
kmutex_t abd_mtx;
union {
struct abd_scatter {
uint_t abd_offset;
#if defined(__FreeBSD__) && defined(_KERNEL)
void *abd_chunks[1]; /* actually variable-length */
#else
uint_t abd_nents;
struct scatterlist *abd_sgl;
#endif
} abd_scatter;
struct abd_linear {
void *abd_buf;
struct scatterlist *abd_sgl; /* for LINEAR_PAGE */
} abd_linear;
struct abd_gang {
list_t abd_gang_chain;
} abd_gang;
} abd_u;
} abd_t;
typedef int abd_iter_func_t(void *buf, size_t len, void *priv);
typedef int abd_iter_func2_t(void *bufa, void *bufb, size_t len, void *priv);
extern int zfs_abd_scatter_enabled;
/*
* Allocations and deallocations
*/
+__attribute__((malloc))
abd_t *abd_alloc(size_t, boolean_t);
+__attribute__((malloc))
abd_t *abd_alloc_linear(size_t, boolean_t);
+__attribute__((malloc))
abd_t *abd_alloc_gang(void);
+__attribute__((malloc))
abd_t *abd_alloc_for_io(size_t, boolean_t);
+__attribute__((malloc))
abd_t *abd_alloc_sametype(abd_t *, size_t);
boolean_t abd_size_alloc_linear(size_t);
void abd_gang_add(abd_t *, abd_t *, boolean_t);
void abd_free(abd_t *);
abd_t *abd_get_offset(abd_t *, size_t);
abd_t *abd_get_offset_size(abd_t *, size_t, size_t);
abd_t *abd_get_offset_struct(abd_t *, abd_t *, size_t, size_t);
abd_t *abd_get_zeros(size_t);
abd_t *abd_get_from_buf(void *, size_t);
void abd_cache_reap_now(void);
/*
* Conversion to and from a normal buffer
*/
void *abd_to_buf(abd_t *);
void *abd_borrow_buf(abd_t *, size_t);
void *abd_borrow_buf_copy(abd_t *, size_t);
void abd_return_buf(abd_t *, void *, size_t);
void abd_return_buf_copy(abd_t *, void *, size_t);
void abd_take_ownership_of_buf(abd_t *, boolean_t);
void abd_release_ownership_of_buf(abd_t *);
/*
* ABD operations
*/
int abd_iterate_func(abd_t *, size_t, size_t, abd_iter_func_t *, void *);
int abd_iterate_func2(abd_t *, abd_t *, size_t, size_t, size_t,
abd_iter_func2_t *, void *);
void abd_copy_off(abd_t *, abd_t *, size_t, size_t, size_t);
void abd_copy_from_buf_off(abd_t *, const void *, size_t, size_t);
void abd_copy_to_buf_off(void *, abd_t *, size_t, size_t);
int abd_cmp(abd_t *, abd_t *);
int abd_cmp_buf_off(abd_t *, const void *, size_t, size_t);
void abd_zero_off(abd_t *, size_t, size_t);
void abd_verify(abd_t *);
void abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t));
void abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul);
/*
* Wrappers for calls with offsets of 0
*/
static inline void
abd_copy(abd_t *dabd, abd_t *sabd, size_t size)
{
abd_copy_off(dabd, sabd, 0, 0, size);
}
static inline void
abd_copy_from_buf(abd_t *abd, const void *buf, size_t size)
{
abd_copy_from_buf_off(abd, buf, 0, size);
}
static inline void
abd_copy_to_buf(void* buf, abd_t *abd, size_t size)
{
abd_copy_to_buf_off(buf, abd, 0, size);
}
static inline int
abd_cmp_buf(abd_t *abd, const void *buf, size_t size)
{
return (abd_cmp_buf_off(abd, buf, 0, size));
}
static inline void
abd_zero(abd_t *abd, size_t size)
{
abd_zero_off(abd, 0, size);
}
/*
* ABD type check functions
*/
static inline boolean_t
abd_is_linear(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_LINEAR) ? B_TRUE : B_FALSE);
}
static inline boolean_t
abd_is_linear_page(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_LINEAR_PAGE) ? B_TRUE : B_FALSE);
}
static inline boolean_t
abd_is_gang(abd_t *abd)
{
return ((abd->abd_flags & ABD_FLAG_GANG) ? B_TRUE : B_FALSE);
}
static inline uint_t
abd_get_size(abd_t *abd)
{
return (abd->abd_size);
}
/*
* Module lifecycle
* Defined in each specific OS's abd_os.c
*/
void abd_init(void);
void abd_fini(void);
/*
* Linux ABD bio functions
*/
#if defined(__linux__) && defined(_KERNEL)
unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t);
unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ABD_H */
diff --git a/sys/contrib/openzfs/include/sys/btree.h b/sys/contrib/openzfs/include/sys/btree.h
index 883abb5181c9..6e05eee8f01d 100644
--- a/sys/contrib/openzfs/include/sys/btree.h
+++ b/sys/contrib/openzfs/include/sys/btree.h
@@ -1,252 +1,310 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
#ifndef _BTREE_H
#define _BTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/zfs_context.h>
/*
* This file defines the interface for a B-Tree implementation for ZFS. The
* tree can be used to store arbitrary sortable data types with low overhead
* and good operation performance. In addition the tree intelligently
* optimizes bulk in-order insertions to improve memory use and performance.
*
* Note that for all B-Tree functions, the values returned are pointers to the
* internal copies of the data in the tree. The internal data can only be
* safely mutated if the changes cannot change the ordering of the element
* with respect to any other elements in the tree.
*
* The major drawback of the B-Tree is that any returned elements or indexes
* are only valid until a side-effectful operation occurs, since these can
* result in reallocation or relocation of data. Side effectful operations are
* defined as insertion, removal, and zfs_btree_destroy_nodes.
*
* The B-Tree has two types of nodes: core nodes, and leaf nodes. Core
* nodes have an array of children pointing to other nodes, and an array of
* elements that act as separators between the elements of the subtrees rooted
* at its children. Leaf nodes only contain data elements, and form the bottom
* layer of the tree. Unlike B+ Trees, in this B-Tree implementation the
* elements in the core nodes are not copies of or references to leaf node
* elements. Each element occurs only once in the tree, no matter what kind
* of node it is in.
*
* The tree's height is the same throughout, unlike many other forms of search
* tree. Each node (except for the root) must be between half minus one and
* completely full of elements (and children) at all times. Any operation that
* would put the node outside of that range results in a rebalancing operation
* (taking, merging, or splitting).
*
* This tree was implemented using descriptions from Wikipedia's articles on
* B-Trees and B+ Trees.
*/
/*
* Decreasing these values results in smaller memmove operations, but more of
* them, and increased memory overhead. Increasing these values results in
* higher variance in operation time, and reduces memory overhead.
*/
#define BTREE_CORE_ELEMS 126
#define BTREE_LEAF_SIZE 4096
extern kmem_cache_t *zfs_btree_leaf_cache;
typedef struct zfs_btree_hdr {
struct zfs_btree_core *bth_parent;
/*
* Set to -1 to indicate core nodes. Other values represent first
* valid element offset for leaf nodes.
*/
uint32_t bth_first;
/*
* For both leaf and core nodes, represents the number of elements in
* the node. For core nodes, they will have bth_count + 1 children.
*/
uint32_t bth_count;
} zfs_btree_hdr_t;
typedef struct zfs_btree_core {
zfs_btree_hdr_t btc_hdr;
zfs_btree_hdr_t *btc_children[BTREE_CORE_ELEMS + 1];
uint8_t btc_elems[];
} zfs_btree_core_t;
typedef struct zfs_btree_leaf {
zfs_btree_hdr_t btl_hdr;
uint8_t btl_elems[];
} zfs_btree_leaf_t;
typedef struct zfs_btree_index {
zfs_btree_hdr_t *bti_node;
uint32_t bti_offset;
/*
* True if the location is before the list offset, false if it's at
* the listed offset.
*/
boolean_t bti_before;
} zfs_btree_index_t;
-typedef struct btree {
+typedef struct btree zfs_btree_t;
+typedef void * (*bt_find_in_buf_f) (zfs_btree_t *, uint8_t *, uint32_t,
+ const void *, zfs_btree_index_t *);
+
+struct btree {
int (*bt_compar) (const void *, const void *);
+ bt_find_in_buf_f bt_find_in_buf;
size_t bt_elem_size;
size_t bt_leaf_size;
uint32_t bt_leaf_cap;
int32_t bt_height;
uint64_t bt_num_elems;
uint64_t bt_num_nodes;
zfs_btree_hdr_t *bt_root;
zfs_btree_leaf_t *bt_bulk; // non-null if bulk loading
-} zfs_btree_t;
+};
+
+/*
+ * Implementation of Shar's algorithm designed to accelerate binary search by
+ * eliminating impossible to predict branches.
+ *
+ * For optimality, this should be used to generate the search function in the
+ * same file as the comparator and the comparator should be marked
+ * `__attribute__((always_inline) inline` so that the compiler will inline it.
+ *
+ * Arguments are:
+ *
+ * NAME - The function name for this instance of the search function. Use it
+ * in a subsequent call to zfs_btree_create().
+ * T - The element type stored inside the B-Tree.
+ * COMP - A comparator to compare two nodes, it must return exactly: -1, 0,
+ * or +1 -1 for <, 0 for ==, and +1 for >. For trivial comparisons,
+ * TREE_CMP() from avl.h can be used in a boilerplate function.
+ */
+/* BEGIN CSTYLED */
+#define ZFS_BTREE_FIND_IN_BUF_FUNC(NAME, T, COMP) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") \
+static void * \
+NAME(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems, \
+ const void *value, zfs_btree_index_t *where) \
+{ \
+ T *i = (T *)buf; \
+ (void) tree; \
+ _Pragma("GCC unroll 9") \
+ while (nelems > 1) { \
+ uint32_t half = nelems / 2; \
+ nelems -= half; \
+ i += (COMP(&i[half - 1], value) < 0) * half; \
+ } \
+ \
+ int comp = COMP(i, value); \
+ where->bti_offset = (i - (T *)buf) + (comp < 0); \
+ where->bti_before = (comp != 0); \
+ \
+ if (comp == 0) { \
+ return (i); \
+ } \
+ \
+ return (NULL); \
+} \
+_Pragma("GCC diagnostic pop")
+/* END CSTYLED */
/*
* Allocate and deallocate caches for btree nodes.
*/
void zfs_btree_init(void);
void zfs_btree_fini(void);
/*
* Initialize an B-Tree. Arguments are:
*
* tree - the tree to be initialized
* compar - function to compare two nodes, it must return exactly: -1, 0, or +1
* -1 for <, 0 for ==, and +1 for >
+ * find - optional function to accelerate searches inside B-Tree nodes
+ * through Shar's algorithm and comparator inlining. Setting this to
+ * NULL will use a generic function. The function should be created
+ * using ZFS_BTREE_FIND_IN_BUF_FUNC() in the same file as compar.
+ * compar should be marked `__attribute__((always_inline)) inline` or
+ * performance is unlikely to improve very much.
* size - the value of sizeof(struct my_type)
* lsize - custom leaf size
*/
void zfs_btree_create(zfs_btree_t *, int (*) (const void *, const void *),
- size_t);
+ bt_find_in_buf_f, size_t);
void zfs_btree_create_custom(zfs_btree_t *, int (*)(const void *, const void *),
- size_t, size_t);
+ bt_find_in_buf_f, size_t, size_t);
/*
* Find a node with a matching value in the tree. Returns the matching node
* found. If not found, it returns NULL and then if "where" is not NULL it sets
* "where" for use with zfs_btree_add_idx() or zfs_btree_nearest().
*
* node - node that has the value being looked for
* where - position for use with zfs_btree_nearest() or zfs_btree_add_idx(),
* may be NULL
*/
void *zfs_btree_find(zfs_btree_t *, const void *, zfs_btree_index_t *);
/*
* Insert a node into the tree.
*
* node - the node to insert
* where - position as returned from zfs_btree_find()
*/
void zfs_btree_add_idx(zfs_btree_t *, const void *, const zfs_btree_index_t *);
/*
* Return the first or last valued node in the tree. Will return NULL if the
* tree is empty. The index can be NULL if the location of the first or last
* element isn't required.
*/
void *zfs_btree_first(zfs_btree_t *, zfs_btree_index_t *);
void *zfs_btree_last(zfs_btree_t *, zfs_btree_index_t *);
/*
* Return the next or previous valued node in the tree. The second index can
* safely be NULL, if the location of the next or previous value isn't
* required.
*/
void *zfs_btree_next(zfs_btree_t *, const zfs_btree_index_t *,
zfs_btree_index_t *);
void *zfs_btree_prev(zfs_btree_t *, const zfs_btree_index_t *,
zfs_btree_index_t *);
/*
* Get a value from a tree and an index.
*/
void *zfs_btree_get(zfs_btree_t *, zfs_btree_index_t *);
/*
* Add a single value to the tree. The value must not compare equal to any
* other node already in the tree. Note that the value will be copied out, not
* inserted directly. It is safe to free or destroy the value once this
* function returns.
*/
void zfs_btree_add(zfs_btree_t *, const void *);
/*
* Remove a single value from the tree. The value must be in the tree. The
* pointer passed in may be a pointer into a tree-controlled buffer, but it
* need not be.
*/
void zfs_btree_remove(zfs_btree_t *, const void *);
/*
* Remove the value at the given location from the tree.
*/
void zfs_btree_remove_idx(zfs_btree_t *, zfs_btree_index_t *);
/*
* Return the number of nodes in the tree
*/
ulong_t zfs_btree_numnodes(zfs_btree_t *);
/*
* Used to destroy any remaining nodes in a tree. The cookie argument should
* be initialized to NULL before the first call. Returns a node that has been
* removed from the tree and may be free()'d. Returns NULL when the tree is
* empty.
*
* Once you call zfs_btree_destroy_nodes(), you can only continuing calling it
* and finally zfs_btree_destroy(). No other B-Tree routines will be valid.
*
* cookie - an index used to save state between calls to
* zfs_btree_destroy_nodes()
*
* EXAMPLE:
* zfs_btree_t *tree;
* struct my_data *node;
* zfs_btree_index_t *cookie;
*
* cookie = NULL;
* while ((node = zfs_btree_destroy_nodes(tree, &cookie)) != NULL)
* data_destroy(node);
* zfs_btree_destroy(tree);
*/
void *zfs_btree_destroy_nodes(zfs_btree_t *, zfs_btree_index_t **);
/*
* Destroys all nodes in the tree quickly. This doesn't give the caller an
* opportunity to iterate over each node and do its own cleanup; for that, use
* zfs_btree_destroy_nodes().
*/
void zfs_btree_clear(zfs_btree_t *);
/*
* Final destroy of an B-Tree. Arguments are:
*
* tree - the empty tree to destroy
*/
void zfs_btree_destroy(zfs_btree_t *tree);
/* Runs a variety of self-checks on the btree to verify integrity. */
void zfs_btree_verify(zfs_btree_t *tree);
#ifdef __cplusplus
}
#endif
#endif /* _BTREE_H */
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index ed752967cca6..1fa2044008dc 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1,1247 +1,1243 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Datto Inc.
*/
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/bitops.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Forward references that lots of things need.
*/
typedef struct spa spa_t;
typedef struct vdev vdev_t;
typedef struct metaslab metaslab_t;
typedef struct metaslab_group metaslab_group_t;
typedef struct metaslab_class metaslab_class_t;
typedef struct zio zio_t;
typedef struct zilog zilog_t;
typedef struct spa_aux_vdev spa_aux_vdev_t;
typedef struct ddt ddt_t;
typedef struct ddt_entry ddt_entry_t;
typedef struct zbookmark_phys zbookmark_phys_t;
typedef struct zbookmark_err_phys zbookmark_err_phys_t;
struct bpobj;
struct bplist;
struct dsl_pool;
struct dsl_dataset;
struct dsl_crypto_params;
/*
* Alignment Shift (ashift) is an immutable, internal top-level vdev property
* which can only be set at vdev creation time. Physical writes are always done
* according to it, which makes 2^ashift the smallest possible IO on a vdev.
*
* We currently allow values ranging from 512 bytes (2^9 = 512) to 64 KiB
* (2^16 = 65,536).
*/
#define ASHIFT_MIN 9
#define ASHIFT_MAX 16
/*
* Size of block to hold the configuration data (a packed nvlist)
*/
#define SPA_CONFIG_BLOCKSIZE (1ULL << 14)
/*
* The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
* The ASIZE encoding should be at least 64 times larger (6 more bits)
* to support up to 4-way RAID-Z mirror mode with worst-case gang block
* overhead, three DVAs per bp, plus one more bit in case we do anything
* else that expands the ASIZE.
*/
#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
#define SPA_COMPRESSBITS 7
#define SPA_VDEVBITS 24
#define SPA_COMPRESSMASK ((1U << SPA_COMPRESSBITS) - 1)
/*
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
* The members of the dva_t should be considered opaque outside the SPA.
*/
typedef struct dva {
uint64_t dva_word[2];
} dva_t;
/*
* Some checksums/hashes need a 256-bit initialization salt. This salt is kept
* secret and is suitable for use in MAC algorithms as the key.
*/
typedef struct zio_cksum_salt {
uint8_t zcs_bytes[32];
} zio_cksum_salt_t;
/*
* Each block is described by its DVAs, time of birth, checksum, etc.
* The word-by-word, bit-by-bit layout of the blkptr is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | pad | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | pad | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | pad | vdev3 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 |G| offset3 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | checksum[2] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | checksum[3] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* vdev virtual device ID
* offset offset into virtual device
* LSIZE logical size
* PSIZE physical size (after compression)
* ASIZE allocated size (including RAID-Z parity and gang block headers)
* GRID RAID-Z layout information (reserved for future use)
* cksum checksum function
* comp compression function
* G gang block indicator
* B byteorder (endianness)
* D dedup
* X encryption
* E blkptr_t contains embedded data (see below)
* lvl level of indirection
* type DMU object type
* phys birth txg when dva[0] was written; zero if same as logical birth txg
* note that typically all the dva's would be written in this
* txg, but they could be different if they were moved by
* device removal.
* log. birth transaction group in which the block was logically born
* fill count number of non-zero blocks under this bp
* checksum[4] 256-bit checksum of the data this bp describes
*/
/*
* The blkptr_t's of encrypted blocks also need to store the encryption
* parameters so that the block can be decrypted. This layout is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | salt |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 | IV1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | IV2 | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | MAC[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | MAC[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* salt Salt for generating encryption keys
* IV1 First 64 bits of encryption IV
* X Block requires encryption handling (set to 1)
* E blkptr_t contains embedded data (set to 0, see below)
* fill count number of non-zero blocks under this bp (truncated to 32 bits)
* IV2 Last 32 bits of encryption IV
* checksum[2] 128-bit checksum of the data this bp describes
* MAC[2] 128-bit message authentication code for this data
*
* The X bit being set indicates that this block is one of 3 types. If this is
* a level 0 block with an encrypted object type, the block is encrypted
* (see BP_IS_ENCRYPTED()). If this is a level 0 block with an unencrypted
* object type, this block is authenticated with an HMAC (see
* BP_IS_AUTHENTICATED()). Otherwise (if level > 0), this bp will use the MAC
* words to store a checksum-of-MACs from the level below (see
* BP_HAS_INDIRECT_MAC_CKSUM()). For convenience in the code, BP_IS_PROTECTED()
* refers to both encrypted and authenticated blocks and BP_USES_CRYPT()
* refers to any of these 3 kinds of blocks.
*
* The additional encryption parameters are the salt, IV, and MAC which are
* explained in greater detail in the block comment at the top of zio_crypt.c.
* The MAC occupies half of the checksum space since it serves a very similar
* purpose: to prevent data corruption on disk. The only functional difference
* is that the checksum is used to detect on-disk corruption whether or not the
* encryption key is loaded and the MAC provides additional protection against
* malicious disk tampering. We use the 3rd DVA to store the salt and first
* 64 bits of the IV. As a result encrypted blocks can only have 2 copies
* maximum instead of the normal 3. The last 32 bits of the IV are stored in
* the upper bits of what is usually the fill count. Note that only blocks at
* level 0 or -2 are ever encrypted, which allows us to guarantee that these
* 32 bits are not trampled over by other code (see zio_crypt.c for details).
* The salt and IV are not used for authenticated bps or bps with an indirect
* MAC checksum, so these blocks can utilize all 3 DVAs and the full 64 bits
* for the fill count.
*/
/*
* "Embedded" blkptr_t's don't actually point to a block, instead they
* have a data payload embedded in the blkptr_t itself. See the comment
* in blkptr.c for more details.
*
* The blkptr_t is laid out as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | payload |
* 1 | payload |
* 2 | payload |
* 3 | payload |
* 4 | payload |
* 5 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | payload |
* 8 | payload |
* 9 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | payload |
* c | payload |
* d | payload |
* e | payload |
* f | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* payload contains the embedded data
* B (byteorder) byteorder (endianness)
* D (dedup) padding (set to zero)
* X encryption (set to zero)
* E (embedded) set to one
* lvl indirection level
* type DMU object type
* etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
* comp compression function of payload
* PSIZE size of payload after compression, in bytes
* LSIZE logical size of payload, in bytes
* note that 25 bits is enough to store the largest
* "normal" BP's LSIZE (2^16 * 2^9) in bytes
* log. birth transaction group in which the block was logically born
*
* Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
* bp's they are stored in units of SPA_MINBLOCKSHIFT.
* Generally, the generic BP_GET_*() macros can be used on embedded BP's.
* The B, D, X, lvl, type, and comp fields are stored the same as with normal
* BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
* be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
* other macros, as they assert that they are only used on BP's of the correct
* "embedded-ness". Encrypted blkptr_t's cannot be embedded because they use
* the payload space for encryption parameters (see the comment above on
* how encryption parameters are stored).
*/
#define BPE_GET_ETYPE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET((bp)->blk_prop, 40, 8))
#define BPE_SET_ETYPE(bp, t) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, t); \
} while (0)
#define BPE_GET_LSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
#define BPE_SET_LSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
} while (0)
#define BPE_GET_PSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
#define BPE_SET_PSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
} while (0)
typedef enum bp_embedded_type {
BP_EMBEDDED_TYPE_DATA,
BP_EMBEDDED_TYPE_RESERVED, /* Reserved for Delphix byteswap feature. */
BP_EMBEDDED_TYPE_REDACTED,
NUM_BP_EMBEDDED_TYPES
} bp_embedded_type_t;
#define BPE_NUM_WORDS 14
#define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
#define BPE_IS_PAYLOADWORD(bp, wp) \
((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
#define SPA_SYNC_MIN_VDEVS 3 /* min vdevs to update during sync */
/*
* A block is a hole when it has either 1) never been written to, or
* 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
* without physically allocating disk space. Holes are represented in the
* blkptr_t structure by zeroed blk_dva. Correct checking for holes is
* done through the BP_IS_HOLE macro. For holes, the logical size, level,
* DMU object type, and birth times are all also stored for holes that
* were written to at some point (i.e. were punched after having been filled).
*/
typedef struct blkptr {
dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
uint64_t blk_prop; /* size, compression, type, etc */
uint64_t blk_pad[2]; /* Extra space for the future */
uint64_t blk_phys_birth; /* txg when block was allocated */
uint64_t blk_birth; /* transaction group at birth */
uint64_t blk_fill; /* fill count */
zio_cksum_t blk_cksum; /* 256-bit checksum */
} blkptr_t;
/*
* Macros to get and set fields in a bp or DVA.
*/
/*
* Note, for gang blocks, DVA_GET_ASIZE() is the total space allocated for
* this gang DVA including its children BP's. The space allocated at this
* DVA's vdev/offset is vdev_gang_header_asize(vdev).
*/
#define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \
BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, SPA_VDEVBITS)
#define DVA_SET_VDEV(dva, x) \
BF64_SET((dva)->dva_word[0], 32, SPA_VDEVBITS, x)
#define DVA_GET_OFFSET(dva) \
BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_OFFSET(dva, x) \
BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
#define BP_GET_LSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? \
(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_LSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
} while (0)
#define BP_GET_PSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_PSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
} while (0)
#define BP_GET_COMPRESS(bp) \
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
#define BP_SET_COMPRESS(bp, x) \
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
#define BP_GET_CHECKSUM(bp) \
(BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
BF64_GET((bp)->blk_prop, 40, 8))
#define BP_SET_CHECKSUM(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, x); \
} while (0)
#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
/* encrypted, authenticated, and MAC cksum bps use the same bit */
#define BP_USES_CRYPT(bp) BF64_GET((bp)->blk_prop, 61, 1)
#define BP_SET_CRYPT(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x)
#define BP_IS_ENCRYPTED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_IS_AUTHENTICATED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
!DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_HAS_INDIRECT_MAC_CKSUM(bp) \
(BP_USES_CRYPT(bp) && BP_GET_LEVEL(bp) > 0)
#define BP_IS_PROTECTED(bp) \
(BP_IS_ENCRYPTED(bp) || BP_IS_AUTHENTICATED(bp))
#define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
#define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
#define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
#define BP_GET_FREE(bp) BF64_GET((bp)->blk_fill, 0, 1)
#define BP_SET_FREE(bp, x) BF64_SET((bp)->blk_fill, 0, 1, x)
#define BP_PHYSICAL_BIRTH(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
#define BP_SET_BIRTH(bp, logical, physical) \
{ \
ASSERT(!BP_IS_EMBEDDED(bp)); \
(bp)->blk_birth = (logical); \
(bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
}
#define BP_GET_FILL(bp) \
((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \
((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill))
#define BP_SET_FILL(bp, fill) \
{ \
if (BP_IS_ENCRYPTED(bp)) \
BF64_SET((bp)->blk_fill, 0, 32, fill); \
else \
(bp)->blk_fill = fill; \
}
#define BP_GET_IV2(bp) \
(ASSERT(BP_IS_ENCRYPTED(bp)), \
BF64_GET((bp)->blk_fill, 32, 32))
#define BP_SET_IV2(bp, iv2) \
{ \
ASSERT(BP_IS_ENCRYPTED(bp)); \
BF64_SET((bp)->blk_fill, 32, 32, iv2); \
}
#define BP_IS_METADATA(bp) \
(BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
#define BP_GET_ASIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_GET_UCSIZE(bp) \
(BP_IS_METADATA(bp) ? BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
#define BP_GET_NDVAS(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(!!DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_COUNT_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(DVA_GET_GANG(&(bp)->blk_dva[0]) + \
DVA_GET_GANG(&(bp)->blk_dva[1]) + \
(DVA_GET_GANG(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp))))
#define DVA_EQUAL(dva1, dva2) \
((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
(dva1)->dva_word[0] == (dva2)->dva_word[0])
#define BP_EQUAL(bp1, bp2) \
(BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \
(bp1)->blk_birth == (bp2)->blk_birth && \
DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \
DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \
DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
#define BP_IDENTITY(bp) (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
#define BP_IS_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
#define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
(dva)->dva_word[1] == 0ULL)
#define BP_IS_HOLE(bp) \
(!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
#define BP_SET_REDACTED(bp) \
{ \
BP_SET_EMBEDDED(bp, B_TRUE); \
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_REDACTED); \
}
#define BP_IS_REDACTED(bp) \
(BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_REDACTED)
/* BP_IS_RAIDZ(bp) assumes no block compression */
#define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
BP_GET_PSIZE(bp))
#define BP_ZERO(bp) \
{ \
(bp)->blk_dva[0].dva_word[0] = 0; \
(bp)->blk_dva[0].dva_word[1] = 0; \
(bp)->blk_dva[1].dva_word[0] = 0; \
(bp)->blk_dva[1].dva_word[1] = 0; \
(bp)->blk_dva[2].dva_word[0] = 0; \
(bp)->blk_dva[2].dva_word[1] = 0; \
(bp)->blk_prop = 0; \
(bp)->blk_pad[0] = 0; \
(bp)->blk_pad[1] = 0; \
(bp)->blk_phys_birth = 0; \
(bp)->blk_birth = 0; \
(bp)->blk_fill = 0; \
ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
}
#ifdef _ZFS_BIG_ENDIAN
#define ZFS_HOST_BYTEORDER (0ULL)
#else
#define ZFS_HOST_BYTEORDER (1ULL)
#endif
#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
#define BP_SPRINTF_LEN 400
/*
* This macro allows code sharing between zfs, libzpool, and mdb.
* 'func' is either kmem_scnprintf() or mdb_snprintf().
* 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
*/
#define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
{ \
static const char *const copyname[] = \
{ "zero", "single", "double", "triple" }; \
int len = 0; \
int copies = 0; \
const char *crypt_type; \
if (bp != NULL) { \
if (BP_IS_ENCRYPTED(bp)) { \
crypt_type = "encrypted"; \
/* LINTED E_SUSPICIOUS_COMPARISON */ \
} else if (BP_IS_AUTHENTICATED(bp)) { \
crypt_type = "authenticated"; \
} else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { \
crypt_type = "indirect-MAC"; \
} else { \
crypt_type = "unencrypted"; \
} \
} \
if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \
} else if (BP_IS_HOLE(bp)) { \
len += func(buf + len, size - len, \
"HOLE [L%llu %s] " \
"size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_EMBEDDED(bp)) { \
len = func(buf + len, size - len, \
"EMBEDDED [L%llu %s] et=%u %s " \
"size=%llxL/%llxP birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(int)BPE_GET_ETYPE(bp), \
compress, \
(u_longlong_t)BPE_GET_LSIZE(bp), \
(u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_REDACTED(bp)) { \
len += func(buf + len, size - len, \
"REDACTED [L%llu %s] size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else { \
for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \
copies++; \
len += func(buf + len, size - len, \
"DVA[%d]=<%llu:%llx:%llx>%c", d, \
(u_longlong_t)DVA_GET_VDEV(dva), \
(u_longlong_t)DVA_GET_OFFSET(dva), \
(u_longlong_t)DVA_GET_ASIZE(dva), \
ws); \
} \
ASSERT3S(copies, >, 0); \
if (BP_IS_ENCRYPTED(bp)) { \
len += func(buf + len, size - len, \
"salt=%llx iv=%llx:%llx%c", \
(u_longlong_t)bp->blk_dva[2].dva_word[0], \
(u_longlong_t)bp->blk_dva[2].dva_word[1], \
(u_longlong_t)BP_GET_IV2(bp), \
ws); \
} \
if (BP_IS_GANG(bp) && \
DVA_GET_ASIZE(&bp->blk_dva[2]) <= \
DVA_GET_ASIZE(&bp->blk_dva[1]) / 2) \
copies--; \
len += func(buf + len, size - len, \
"[L%llu %s] %s %s %s %s %s %s %s%c" \
"size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c" \
"cksum=%016llx:%016llx:%016llx:%016llx", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
checksum, \
compress, \
crypt_type, \
BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", \
BP_IS_GANG(bp) ? "gang" : "contiguous", \
BP_GET_DEDUP(bp) ? "dedup" : "unique", \
copyname[copies], \
ws, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)BP_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth, \
(u_longlong_t)BP_PHYSICAL_BIRTH(bp), \
(u_longlong_t)BP_GET_FILL(bp), \
ws, \
(u_longlong_t)bp->blk_cksum.zc_word[0], \
(u_longlong_t)bp->blk_cksum.zc_word[1], \
(u_longlong_t)bp->blk_cksum.zc_word[2], \
(u_longlong_t)bp->blk_cksum.zc_word[3]); \
} \
ASSERT(len < size); \
}
#define BP_GET_BUFC_TYPE(bp) \
(BP_IS_METADATA(bp) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
typedef enum spa_import_type {
SPA_IMPORT_EXISTING,
SPA_IMPORT_ASSEMBLE
} spa_import_type_t;
typedef enum spa_mode {
SPA_MODE_UNINIT = 0,
SPA_MODE_READ = 1,
SPA_MODE_WRITE = 2,
} spa_mode_t;
/*
* Send TRIM commands in-line during normal pool operation while deleting.
* OFF: no
* ON: yes
* NB: IN_FREEBSD_BASE is defined within the FreeBSD sources.
*/
typedef enum {
SPA_AUTOTRIM_OFF = 0, /* default */
SPA_AUTOTRIM_ON,
#ifdef IN_FREEBSD_BASE
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_ON,
#else
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_OFF,
#endif
} spa_autotrim_t;
/*
* Reason TRIM command was issued, used internally for accounting purposes.
*/
typedef enum trim_type {
TRIM_TYPE_MANUAL = 0,
TRIM_TYPE_AUTO = 1,
TRIM_TYPE_SIMPLE = 2
} trim_type_t;
/* state manipulation functions */
extern int spa_open(const char *pool, spa_t **, const void *tag);
extern int spa_open_rewind(const char *pool, spa_t **, const void *tag,
nvlist_t *policy, nvlist_t **config);
extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
size_t buflen);
extern int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, struct dsl_crypto_params *dcp);
extern int spa_import(char *pool, nvlist_t *config, nvlist_t *props,
uint64_t flags);
extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
extern int spa_destroy(const char *pool);
extern int spa_checkpoint(const char *pool);
extern int spa_checkpoint_discard(const char *pool);
extern int spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce);
extern int spa_reset(const char *pool);
extern void spa_async_request(spa_t *spa, int flag);
extern void spa_async_unrequest(spa_t *spa, int flag);
extern void spa_async_suspend(spa_t *spa);
extern void spa_async_resume(spa_t *spa);
extern int spa_async_tasks(spa_t *spa);
extern spa_t *spa_inject_addref(char *pool);
extern void spa_inject_delref(spa_t *spa);
extern void spa_scan_stat_init(spa_t *spa);
extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
extern int bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
extern int bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
#define SPA_ASYNC_CONFIG_UPDATE 0x01
#define SPA_ASYNC_REMOVE 0x02
#define SPA_ASYNC_PROBE 0x04
#define SPA_ASYNC_RESILVER_DONE 0x08
#define SPA_ASYNC_RESILVER 0x10
#define SPA_ASYNC_AUTOEXPAND 0x20
#define SPA_ASYNC_REMOVE_DONE 0x40
#define SPA_ASYNC_REMOVE_STOP 0x80
#define SPA_ASYNC_INITIALIZE_RESTART 0x100
#define SPA_ASYNC_TRIM_RESTART 0x200
#define SPA_ASYNC_AUTOTRIM_RESTART 0x400
#define SPA_ASYNC_L2CACHE_REBUILD 0x800
#define SPA_ASYNC_L2CACHE_TRIM 0x1000
#define SPA_ASYNC_REBUILD_DONE 0x2000
#define SPA_ASYNC_DETACH_SPARE 0x4000
/* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
int replacing, int rebuild);
extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
int replace_done);
extern int spa_vdev_alloc(spa_t *spa, uint64_t guid);
extern int spa_vdev_noalloc(spa_t *spa, uint64_t guid);
extern boolean_t spa_vdev_remove_active(spa_t *spa);
extern int spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist);
extern int spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist);
extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
extern int spa_vdev_split_mirror(spa_t *spa, const char *newname,
nvlist_t *config, nvlist_t *props, boolean_t exp);
/* spare state (which is global across all pools) */
extern void spa_spare_add(vdev_t *vd);
extern void spa_spare_remove(vdev_t *vd);
extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
extern void spa_spare_activate(vdev_t *vd);
/* L2ARC state (which is global across all pools) */
extern void spa_l2cache_add(vdev_t *vd);
extern void spa_l2cache_remove(vdev_t *vd);
extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
extern void spa_l2cache_activate(vdev_t *vd);
extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
/* spa syncing */
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
extern void spa_sync_allpools(void);
extern uint_t zfs_sync_pass_deferred_free;
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
/*
* SPA configuration functions in spa_config.c
*/
#define SPA_CONFIG_UPDATE_POOL 0
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t, boolean_t);
extern void spa_config_load(void);
extern nvlist_t *spa_all_configs(uint64_t *);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
int getstats);
extern void spa_config_update(spa_t *spa, int what);
extern int spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv,
vdev_t *parent, uint_t id, int atype);
/*
* Miscellaneous SPA routines in spa_misc.c
*/
/* Namespace manipulation */
extern spa_t *spa_lookup(const char *name);
extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
extern void spa_remove(spa_t *spa);
extern spa_t *spa_next(spa_t *prev);
/* Refcount functions */
extern void spa_open_ref(spa_t *spa, const void *tag);
extern void spa_close(spa_t *spa, const void *tag);
extern void spa_async_close(spa_t *spa, const void *tag);
extern boolean_t spa_refcount_zero(spa_t *spa);
#define SCL_NONE 0x00
#define SCL_CONFIG 0x01
#define SCL_STATE 0x02
#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */
#define SCL_ALLOC 0x08
#define SCL_ZIO 0x10
#define SCL_FREE 0x20
#define SCL_VDEV 0x40
#define SCL_LOCKS 7
#define SCL_ALL ((1 << SCL_LOCKS) - 1)
#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO)
/* Historical pool statistics */
typedef struct spa_history_kstat {
kmutex_t lock;
uint64_t count;
uint64_t size;
kstat_t *kstat;
void *priv;
list_t list;
} spa_history_kstat_t;
typedef struct spa_history_list {
uint64_t size;
procfs_list_t procfs_list;
} spa_history_list_t;
typedef struct spa_stats {
spa_history_list_t read_history;
spa_history_list_t txg_history;
spa_history_kstat_t tx_assign_histogram;
spa_history_list_t mmp_history;
spa_history_kstat_t state; /* pool state */
spa_history_kstat_t guid; /* pool guid */
spa_history_kstat_t iostats;
} spa_stats_t;
typedef enum txg_state {
TXG_STATE_BIRTH = 0,
TXG_STATE_OPEN = 1,
TXG_STATE_QUIESCED = 2,
TXG_STATE_WAIT_FOR_SYNC = 3,
TXG_STATE_SYNCED = 4,
TXG_STATE_COMMITTED = 5,
} txg_state_t;
typedef struct txg_stat {
vdev_stat_t vs1;
vdev_stat_t vs2;
uint64_t txg;
uint64_t ndirty;
} txg_stat_t;
/* Assorted pool IO kstats */
typedef struct spa_iostats {
kstat_named_t trim_extents_written;
kstat_named_t trim_bytes_written;
kstat_named_t trim_extents_skipped;
kstat_named_t trim_bytes_skipped;
kstat_named_t trim_extents_failed;
kstat_named_t trim_bytes_failed;
kstat_named_t autotrim_extents_written;
kstat_named_t autotrim_bytes_written;
kstat_named_t autotrim_extents_skipped;
kstat_named_t autotrim_bytes_skipped;
kstat_named_t autotrim_extents_failed;
kstat_named_t autotrim_bytes_failed;
kstat_named_t simple_trim_extents_written;
kstat_named_t simple_trim_bytes_written;
kstat_named_t simple_trim_extents_skipped;
kstat_named_t simple_trim_bytes_skipped;
kstat_named_t simple_trim_extents_failed;
kstat_named_t simple_trim_bytes_failed;
} spa_iostats_t;
extern void spa_stats_init(spa_t *spa);
extern void spa_stats_destroy(spa_t *spa);
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
uint32_t aflags);
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time);
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id);
extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
hrtime_t duration);
extern void spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp,
uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id,
int error);
extern void spa_iostats_trim_add(spa_t *spa, trim_type_t type,
uint64_t extents_written, uint64_t bytes_written,
uint64_t extents_skipped, uint64_t bytes_skipped,
uint64_t extents_failed, uint64_t bytes_failed);
extern void spa_import_progress_add(spa_t *spa);
extern void spa_import_progress_remove(uint64_t spa_guid);
extern int spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining);
extern int spa_import_progress_set_max_txg(uint64_t pool_guid,
uint64_t max_txg);
extern int spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t spa_load_state);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_enter_mmp(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
/* Pool vdev add/remove lock */
extern uint64_t spa_vdev_enter(spa_t *spa);
extern uint64_t spa_vdev_detach_enter(spa_t *spa, uint64_t guid);
extern uint64_t spa_vdev_config_enter(spa_t *spa);
extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
int error, const char *tag);
extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
/* Pool vdev state change lock */
extern void spa_vdev_state_enter(spa_t *spa, int oplock);
extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
/* Log state */
typedef enum spa_log_state {
SPA_LOG_UNKNOWN = 0, /* unknown log state */
SPA_LOG_MISSING, /* missing log(s) */
SPA_LOG_CLEAR, /* clear the log(s) */
SPA_LOG_GOOD, /* log(s) are good */
} spa_log_state_t;
extern spa_log_state_t spa_get_log_state(spa_t *spa);
extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
extern int spa_reset_logs(spa_t *spa);
/* Log claim callback */
extern void spa_claim_notify(zio_t *zio);
extern void spa_deadman(void *);
/* Accessor functions */
extern boolean_t spa_shutting_down(spa_t *spa);
extern struct dsl_pool *spa_get_dsl(spa_t *spa);
extern boolean_t spa_is_initializing(spa_t *spa);
extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
extern void spa_altroot(spa_t *, char *, size_t);
extern uint32_t spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa);
extern uint64_t spa_final_dirty_txg(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
extern void spa_update_dspace(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern boolean_t spa_deflate(spa_t *spa);
extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, uint64_t size,
dmu_object_type_t objtype, uint_t level, uint_t special_smallblk);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
extern void spa_evicting_os_wait(spa_t *spa);
extern int spa_max_replication(spa_t *spa);
extern int spa_prev_software_version(spa_t *spa);
extern uint64_t spa_get_failmode(spa_t *spa);
extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern space_map_t *spa_syncing_log_sm(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
extern uint64_t spa_dirty_data(spa_t *spa);
extern spa_autotrim_t spa_get_autotrim(spa_t *spa);
/* Miscellaneous support routines */
extern void spa_load_failed(spa_t *spa, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void spa_load_note(spa_t *spa, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
extern uint64_t spa_generate_guid(spa_t *spa);
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
boolean_t l2cache);
extern boolean_t spa_has_l2cache(spa_t *, uint64_t guid);
extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
extern boolean_t spa_has_slogs(spa_t *spa);
extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
extern boolean_t spa_has_checkpoint(spa_t *spa);
extern boolean_t spa_importing_readonly_checkpoint(spa_t *spa);
extern boolean_t spa_suspend_async_destroy(spa_t *spa);
extern uint64_t spa_min_claim_txg(spa_t *spa);
extern boolean_t zfs_dva_valid(spa_t *spa, const dva_t *dva,
const blkptr_t *bp);
typedef void (*spa_remap_cb_t)(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg);
extern boolean_t spa_remap_blkptr(spa_t *spa, blkptr_t *bp,
spa_remap_cb_t callback, void *arg);
extern uint64_t spa_get_last_removal_txg(spa_t *spa);
extern boolean_t spa_trust_config(spa_t *spa);
extern uint64_t spa_missing_tvds_allowed(spa_t *spa);
extern void spa_set_missing_tvds(spa_t *spa, uint64_t missing);
extern boolean_t spa_top_vdevs_spacemap_addressable(spa_t *spa);
extern uint64_t spa_total_metaslabs(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern uint32_t spa_get_hostid(spa_t *spa);
extern void spa_activate_allocation_classes(spa_t *, dmu_tx_t *);
extern boolean_t spa_livelist_delete_check(spa_t *spa);
extern spa_mode_t spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);
extern char *spa_his_ievent_table[];
extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
char *his_buf);
extern int spa_history_log(spa_t *spa, const char *his_buf);
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
extern void spa_history_log_version(spa_t *spa, const char *operation,
dmu_tx_t *tx);
extern void spa_history_log_internal(spa_t *spa, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern const char *spa_state_to_name(spa_t *spa);
/* error handling */
struct zbookmark_phys;
extern void spa_log_error(spa_t *spa, const zbookmark_phys_t *zb,
const uint64_t *birth);
extern void spa_remove_error(spa_t *spa, zbookmark_phys_t *zb,
const uint64_t *birth);
extern int zfs_ereport_post(const char *clazz, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state);
extern boolean_t zfs_ereport_is_valid(const char *clazz, spa_t *spa, vdev_t *vd,
zio_t *zio);
extern void zfs_ereport_taskq_fini(void);
extern void zfs_ereport_clear(spa_t *spa, vdev_t *vd);
extern nvlist_t *zfs_event_create(spa_t *spa, vdev_t *vd, const char *type,
const char *name, nvlist_t *aux);
extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
extern void zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate);
extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
extern uint64_t spa_approx_errlog_size(spa_t *spa);
extern int spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count);
extern uint64_t spa_get_last_errlog_size(spa_t *spa);
extern void spa_errlog_rotate(spa_t *spa);
extern void spa_errlog_drain(spa_t *spa);
extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
extern void spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx);
extern void spa_swap_errlog(spa_t *spa, uint64_t new_head_ds,
uint64_t old_head_ds, dmu_tx_t *tx);
extern void sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj,
dmu_tx_t *tx);
extern void spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx);
extern int find_top_affected_fs(spa_t *spa, uint64_t head_ds,
zbookmark_err_phys_t *zep, uint64_t *top_affected_fs);
extern int find_birth_txg(struct dsl_dataset *ds, zbookmark_err_phys_t *zep,
uint64_t *birth_txg);
extern void zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep,
zbookmark_phys_t *zb);
extern void name_to_errphys(char *buf, zbookmark_err_phys_t *zep);
-/* vdev cache */
-extern void vdev_cache_stat_init(void);
-extern void vdev_cache_stat_fini(void);
-
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
/* asynchronous event notification */
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
extern void zfs_ereport_zvol_post(const char *subclass, const char *name,
const char *device_name, const char *raw_name);
/* waiting for pool activities to complete */
extern int spa_wait(const char *pool, zpool_wait_activity_t activity,
boolean_t *waited);
extern int spa_wait_tag(const char *name, zpool_wait_activity_t activity,
uint64_t tag, boolean_t *waited);
extern void spa_notify_waiters(spa_t *spa);
extern void spa_wake_waiters(spa_t *spa);
extern void spa_import_os(spa_t *spa);
extern void spa_export_os(spa_t *spa);
extern void spa_activate_os(spa_t *spa);
extern void spa_deactivate_os(spa_t *spa);
/* module param call functions */
int param_set_deadman_ziotime(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_synctime(ZFS_MODULE_PARAM_ARGS);
int param_set_slop_shift(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_failmode(ZFS_MODULE_PARAM_ARGS);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
} while (0)
#else
#define dprintf_bp(bp, fmt, ...)
#endif
extern spa_mode_t spa_mode_global;
extern int zfs_deadman_enabled;
extern uint64_t zfs_deadman_synctime_ms;
extern uint64_t zfs_deadman_ziotime_ms;
extern uint64_t zfs_deadman_checktime_ms;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev.h b/sys/contrib/openzfs/include/sys/vdev.h
index d529bbcdd9a4..26c834ff57cf 100644
--- a/sys/contrib/openzfs/include/sys/vdev.h
+++ b/sys/contrib/openzfs/include/sys/vdev.h
@@ -1,231 +1,225 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
*/
#ifndef _SYS_VDEV_H
#define _SYS_VDEV_H
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/metaslab.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum vdev_dtl_type {
DTL_MISSING, /* 0% replication: no copies of the data */
DTL_PARTIAL, /* less than 100% replication: some copies missing */
DTL_SCRUB, /* unable to fully repair during scrub/resilver */
DTL_OUTAGE, /* temporarily missing (used to attempt detach) */
DTL_TYPES
} vdev_dtl_type_t;
extern int zfs_nocacheflush;
typedef boolean_t vdev_open_children_func_t(vdev_t *vd);
extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void vdev_dbgmsg_print_tree(vdev_t *, int);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern void vdev_open_children_subset(vdev_t *, vdev_open_children_func_t *);
extern int vdev_validate(vdev_t *);
extern int vdev_copy_path_strict(vdev_t *, vdev_t *);
extern void vdev_copy_path_relaxed(vdev_t *, vdev_t *);
extern void vdev_close(vdev_t *);
extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
extern void vdev_reopen(vdev_t *);
extern int vdev_validate_aux(vdev_t *vd);
extern zio_t *vdev_probe(vdev_t *vd, zio_t *pio);
extern boolean_t vdev_is_concrete(vdev_t *vd);
extern boolean_t vdev_is_bootable(vdev_t *vd);
extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev);
extern vdev_t *vdev_lookup_by_guid(vdev_t *vd, uint64_t guid);
extern int vdev_count_leaves(spa_t *spa);
extern void vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t d);
extern boolean_t vdev_default_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done);
extern boolean_t vdev_dtl_required(vdev_t *vd);
extern boolean_t vdev_resilver_needed(vdev_t *vd,
uint64_t *minp, uint64_t *maxp);
extern void vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj,
dmu_tx_t *tx);
extern uint64_t vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset,
uint64_t size);
extern void spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev,
uint64_t offset, uint64_t size, dmu_tx_t *tx);
extern boolean_t vdev_replace_in_progress(vdev_t *vdev);
extern void vdev_hold(vdev_t *);
extern void vdev_rele(vdev_t *);
extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
extern void vdev_metaslab_fini(vdev_t *vd);
extern void vdev_metaslab_set_size(vdev_t *);
extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, const char *tag);
typedef void vdev_xlate_func_t(void *arg, range_seg64_t *physical_rs);
extern boolean_t vdev_xlate_is_empty(range_seg64_t *rs);
extern void vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern void vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);
extern metaslab_group_t *vdev_get_mg(vdev_t *vd, metaslab_class_t *mc);
extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs);
extern void vdev_clear_stats(vdev_t *vd);
extern void vdev_stat_update(zio_t *zio, uint64_t psize);
extern void vdev_scan_stat_init(vdev_t *vd);
extern void vdev_propagate_state(vdev_t *vd);
extern void vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state,
vdev_aux_t aux);
extern boolean_t vdev_children_are_offline(vdev_t *vd);
extern void vdev_space_update(vdev_t *vd,
int64_t alloc_delta, int64_t defer_delta, int64_t space_delta);
extern int64_t vdev_deflated_space(vdev_t *vd, int64_t space);
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
/*
* Return the amount of space allocated for a gang block header.
*/
static inline uint64_t
vdev_gang_header_asize(vdev_t *vd)
{
return (vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE));
}
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,
vdev_state_t *);
extern int vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags);
extern int vdev_remove_wanted(spa_t *spa, uint64_t guid);
extern void vdev_clear(spa_t *spa, vdev_t *vd);
extern boolean_t vdev_is_dead(vdev_t *vd);
extern boolean_t vdev_readable(vdev_t *vd);
extern boolean_t vdev_writeable(vdev_t *vd);
extern boolean_t vdev_allocatable(vdev_t *vd);
extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio);
extern boolean_t vdev_is_spacemap_addressable(vdev_t *vd);
-extern void vdev_cache_init(vdev_t *vd);
-extern void vdev_cache_fini(vdev_t *vd);
-extern boolean_t vdev_cache_read(zio_t *zio);
-extern void vdev_cache_write(zio_t *zio);
-extern void vdev_cache_purge(vdev_t *vd);
-
extern void vdev_queue_init(vdev_t *vd);
extern void vdev_queue_fini(vdev_t *vd);
extern zio_t *vdev_queue_io(zio_t *zio);
extern void vdev_queue_io_done(zio_t *zio);
extern void vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority);
extern int vdev_queue_length(vdev_t *vd);
extern uint64_t vdev_queue_last_offset(vdev_t *vd);
extern void vdev_config_dirty(vdev_t *vd);
extern void vdev_config_clean(vdev_t *vd);
extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
extern void vdev_state_dirty(vdev_t *vd);
extern void vdev_state_clean(vdev_t *vd);
extern void vdev_defer_resilver(vdev_t *vd);
extern boolean_t vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx);
typedef enum vdev_config_flag {
VDEV_CONFIG_SPARE = 1 << 0,
VDEV_CONFIG_L2CACHE = 1 << 1,
VDEV_CONFIG_MOS = 1 << 2,
VDEV_CONFIG_MISSING = 1 << 3
} vdev_config_flag_t;
extern void vdev_post_kobj_evt(vdev_t *vd);
extern void vdev_clear_kobj_evt(vdev_t *vd);
extern void vdev_top_config_generate(spa_t *spa, nvlist_t *config);
extern nvlist_t *vdev_config_generate(spa_t *spa, vdev_t *vd,
boolean_t getstats, vdev_config_flag_t flags);
/*
* Label routines
*/
struct uberblock;
extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset);
extern int vdev_label_number(uint64_t psise, uint64_t offset);
extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
extern void vdev_uberblock_load(vdev_t *, struct uberblock *, nvlist_t **);
extern void vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv);
extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
offset, uint64_t size, zio_done_func_t *done, void *priv, int flags);
extern int vdev_label_read_bootenv(vdev_t *, nvlist_t *);
extern int vdev_label_write_bootenv(vdev_t *, nvlist_t *);
typedef enum {
VDEV_LABEL_CREATE, /* create/add a new device */
VDEV_LABEL_REPLACE, /* replace an existing device */
VDEV_LABEL_SPARE, /* add a new hot spare */
VDEV_LABEL_REMOVE, /* remove an existing device */
VDEV_LABEL_L2CACHE, /* add an L2ARC cache device */
VDEV_LABEL_SPLIT /* generating new label for split-off dev */
} vdev_labeltype_t;
extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
extern int vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl);
extern int vdev_prop_get(vdev_t *vd, nvlist_t *nvprops, nvlist_t *outnvl);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index ea3043c82a39..74b3737d8ee5 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -1,672 +1,652 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#ifndef _SYS_VDEV_IMPL_H
#define _SYS_VDEV_IMPL_H
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/metaslab.h>
#include <sys/nvpair.h>
#include <sys/space_map.h>
#include <sys/vdev.h>
#include <sys/dkio.h>
#include <sys/uberblock_impl.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_removal.h>
#include <sys/zfs_ratelimit.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Virtual device descriptors.
*
* All storage pool operations go through the virtual device framework,
* which provides data replication and I/O scheduling.
*/
/*
* Forward declarations that lots of things need.
*/
typedef struct vdev_queue vdev_queue_t;
-typedef struct vdev_cache vdev_cache_t;
-typedef struct vdev_cache_entry vdev_cache_entry_t;
struct abd;
extern uint_t zfs_vdev_queue_depth_pct;
extern uint_t zfs_vdev_def_queue_depth;
extern uint_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
typedef int vdev_init_func_t(spa_t *spa, nvlist_t *nv, void **tsd);
typedef void vdev_kobj_post_evt_func_t(vdev_t *vd);
typedef void vdev_fini_func_t(vdev_t *vd);
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift, uint64_t *pshift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, const dva_t *dva,
size_t psize, uint64_t phys_birth);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
typedef void vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
uint64_t offset, uint64_t size, void *arg);
typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
vdev_remap_cb_t callback, void *arg);
/*
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
range_seg64_t *physical, range_seg64_t *remain);
typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
uint64_t size, uint64_t max_segment);
typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
uint64_t *sizep);
typedef void vdev_config_generate_func_t(vdev_t *vd, nvlist_t *nv);
typedef uint64_t vdev_nparity_func_t(vdev_t *vd);
typedef uint64_t vdev_ndisks_func_t(vdev_t *vd);
typedef const struct vdev_ops {
vdev_init_func_t *vdev_op_init;
vdev_fini_func_t *vdev_op_fini;
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
vdev_min_asize_func_t *vdev_op_min_asize;
vdev_min_alloc_func_t *vdev_op_min_alloc;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
vdev_need_resilver_func_t *vdev_op_need_resilver;
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
vdev_remap_func_t *vdev_op_remap;
vdev_xlation_func_t *vdev_op_xlate;
vdev_rebuild_asize_func_t *vdev_op_rebuild_asize;
vdev_metaslab_init_func_t *vdev_op_metaslab_init;
vdev_config_generate_func_t *vdev_op_config_generate;
vdev_nparity_func_t *vdev_op_nparity;
vdev_ndisks_func_t *vdev_op_ndisks;
vdev_kobj_post_evt_func_t *vdev_op_kobj_evt_post;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
/*
* Virtual device properties
*/
-struct vdev_cache_entry {
- struct abd *ve_abd;
- uint64_t ve_offset;
- clock_t ve_lastused;
- avl_node_t ve_offset_node;
- avl_node_t ve_lastused_node;
- uint32_t ve_hits;
- uint16_t ve_missed_update;
- zio_t *ve_fill_io;
-};
-
-struct vdev_cache {
- avl_tree_t vc_offset_tree;
- avl_tree_t vc_lastused_tree;
- kmutex_t vc_lock;
-};
-
typedef struct vdev_queue_class {
uint32_t vqc_active;
/*
* Sorted by offset or timestamp, depending on if the queue is
* LBA-ordered vs FIFO.
*/
avl_tree_t vqc_queued_tree;
} vdev_queue_class_t;
struct vdev_queue {
vdev_t *vq_vdev;
vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
avl_tree_t vq_active_tree;
avl_tree_t vq_read_offset_tree;
avl_tree_t vq_write_offset_tree;
avl_tree_t vq_trim_offset_tree;
uint64_t vq_last_offset;
zio_priority_t vq_last_prio; /* Last sent I/O priority. */
uint32_t vq_ia_active; /* Active interactive I/Os. */
uint32_t vq_nia_credit; /* Non-interactive I/Os credit. */
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
kmutex_t vq_lock;
};
typedef enum vdev_alloc_bias {
VDEV_BIAS_NONE,
VDEV_BIAS_LOG, /* dedicated to ZIL data (SLOG) */
VDEV_BIAS_SPECIAL, /* dedicated to ddt, metadata, and small blks */
VDEV_BIAS_DEDUP /* dedicated to dedup metadata */
} vdev_alloc_bias_t;
/*
* On-disk indirect vdev state.
*
* An indirect vdev is described exclusively in the MOS config of a pool.
* The config for an indirect vdev includes several fields, which are
* accessed in memory by a vdev_indirect_config_t.
*/
typedef struct vdev_indirect_config {
/*
* Object (in MOS) which contains the indirect mapping. This object
* contains an array of vdev_indirect_mapping_entry_phys_t ordered by
* vimep_src. The bonus buffer for this object is a
* vdev_indirect_mapping_phys_t. This object is allocated when a vdev
* removal is initiated.
*
* Note that this object can be empty if none of the data on the vdev
* has been copied yet.
*/
uint64_t vic_mapping_object;
/*
* Object (in MOS) which contains the birth times for the mapping
* entries. This object contains an array of
* vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
* buffer for this object is a vdev_indirect_birth_phys_t. This object
* is allocated when a vdev removal is initiated.
*
* Note that this object can be empty if none of the vdev has yet been
* copied.
*/
uint64_t vic_births_object;
/*
* This is the vdev ID which was removed previous to this vdev, or
* UINT64_MAX if there are no previously removed vdevs.
*/
uint64_t vic_prev_indirect_vdev;
} vdev_indirect_config_t;
/*
* Virtual device descriptor
*/
struct vdev {
/*
* Common to all vdev types.
*/
uint64_t vdev_id; /* child number in vdev parent */
uint64_t vdev_guid; /* unique ID for this vdev */
uint64_t vdev_guid_sum; /* self guid + all child guids */
uint64_t vdev_orig_guid; /* orig. guid prior to remove */
uint64_t vdev_asize; /* allocatable device capacity */
uint64_t vdev_min_asize; /* min acceptable asize */
uint64_t vdev_max_asize; /* max acceptable asize */
uint64_t vdev_ashift; /* block alignment shift */
/*
* Logical block alignment shift
*
* The smallest sized/aligned I/O supported by the device.
*/
uint64_t vdev_logical_ashift;
/*
* Physical block alignment shift
*
* The device supports logical I/Os with vdev_logical_ashift
* size/alignment, but optimum performance will be achieved by
* aligning/sizing requests to vdev_physical_ashift. Smaller
* requests may be inflated or incur device level read-modify-write
* operations.
*
* May be 0 to indicate no preference (i.e. use vdev_logical_ashift).
*/
uint64_t vdev_physical_ashift;
uint64_t vdev_state; /* see VDEV_STATE_* #defines */
uint64_t vdev_prevstate; /* used when reopening a vdev */
vdev_ops_t *vdev_ops; /* vdev operations */
spa_t *vdev_spa; /* spa for this vdev */
void *vdev_tsd; /* type-specific data */
vdev_t *vdev_top; /* top-level vdev */
vdev_t *vdev_parent; /* parent vdev */
vdev_t **vdev_child; /* array of children */
uint64_t vdev_children; /* number of children */
vdev_stat_t vdev_stat; /* virtual device statistics */
vdev_stat_ex_t vdev_stat_ex; /* extended statistics */
boolean_t vdev_expanding; /* expand the vdev? */
boolean_t vdev_reopening; /* reopen in progress? */
boolean_t vdev_nonrot; /* true if solid state */
int vdev_load_error; /* error on last load */
int vdev_open_error; /* error on last open */
int vdev_validate_error; /* error on last validate */
kthread_t *vdev_open_thread; /* thread opening children */
kthread_t *vdev_validate_thread; /* thread validating children */
uint64_t vdev_crtxg; /* txg when top-level was added */
uint64_t vdev_root_zap;
/*
* Top-level vdev state.
*/
uint64_t vdev_ms_array; /* metaslab array object */
uint64_t vdev_ms_shift; /* metaslab size shift */
uint64_t vdev_ms_count; /* number of metaslabs */
metaslab_group_t *vdev_mg; /* metaslab group */
metaslab_group_t *vdev_log_mg; /* embedded slog metaslab group */
metaslab_t **vdev_ms; /* metaslab array */
uint64_t vdev_pending_fastwrite; /* allocated fastwrites */
txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */
txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */
txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */
boolean_t vdev_remove_wanted; /* async remove wanted? */
boolean_t vdev_probe_wanted; /* async probe wanted? */
list_node_t vdev_config_dirty_node; /* config dirty list */
list_node_t vdev_state_dirty_node; /* state dirty list */
uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */
uint64_t vdev_islog; /* is an intent log device */
uint64_t vdev_noalloc; /* device is passivated? */
uint64_t vdev_removing; /* device is being removed? */
uint64_t vdev_failfast; /* device failfast setting */
boolean_t vdev_ishole; /* is a hole in the namespace */
uint64_t vdev_top_zap;
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
/* Initialize related */
boolean_t vdev_initialize_exit_wanted;
vdev_initializing_state_t vdev_initialize_state;
list_node_t vdev_initialize_node;
kthread_t *vdev_initialize_thread;
/* Protects vdev_initialize_thread and vdev_initialize_state. */
kmutex_t vdev_initialize_lock;
kcondvar_t vdev_initialize_cv;
uint64_t vdev_initialize_offset[TXG_SIZE];
uint64_t vdev_initialize_last_offset;
range_tree_t *vdev_initialize_tree; /* valid while initializing */
uint64_t vdev_initialize_bytes_est;
uint64_t vdev_initialize_bytes_done;
uint64_t vdev_initialize_action_time; /* start and end time */
/* TRIM related */
boolean_t vdev_trim_exit_wanted;
boolean_t vdev_autotrim_exit_wanted;
vdev_trim_state_t vdev_trim_state;
list_node_t vdev_trim_node;
kmutex_t vdev_autotrim_lock;
kcondvar_t vdev_autotrim_cv;
kcondvar_t vdev_autotrim_kick_cv;
kthread_t *vdev_autotrim_thread;
/* Protects vdev_trim_thread and vdev_trim_state. */
kmutex_t vdev_trim_lock;
kcondvar_t vdev_trim_cv;
kthread_t *vdev_trim_thread;
uint64_t vdev_trim_offset[TXG_SIZE];
uint64_t vdev_trim_last_offset;
uint64_t vdev_trim_bytes_est;
uint64_t vdev_trim_bytes_done;
uint64_t vdev_trim_rate; /* requested rate (bytes/sec) */
uint64_t vdev_trim_partial; /* requested partial TRIM */
uint64_t vdev_trim_secure; /* requested secure TRIM */
uint64_t vdev_trim_action_time; /* start and end time */
/* Rebuild related */
boolean_t vdev_rebuilding;
boolean_t vdev_rebuild_exit_wanted;
boolean_t vdev_rebuild_cancel_wanted;
boolean_t vdev_rebuild_reset_wanted;
kmutex_t vdev_rebuild_lock;
kcondvar_t vdev_rebuild_cv;
kthread_t *vdev_rebuild_thread;
vdev_rebuild_t vdev_rebuild_config;
/* For limiting outstanding I/Os (initialize, TRIM) */
kmutex_t vdev_initialize_io_lock;
kcondvar_t vdev_initialize_io_cv;
uint64_t vdev_initialize_inflight;
kmutex_t vdev_trim_io_lock;
kcondvar_t vdev_trim_io_cv;
uint64_t vdev_trim_inflight[3];
/*
* Values stored in the config for an indirect or removing vdev.
*/
vdev_indirect_config_t vdev_indirect_config;
/*
* The vdev_indirect_rwlock protects the vdev_indirect_mapping
* pointer from changing on indirect vdevs (when it is condensed).
* Note that removing (not yet indirect) vdevs have different
* access patterns (the mapping is not accessed from open context,
* e.g. from zio_read) and locking strategy (e.g. svr_lock).
*/
krwlock_t vdev_indirect_rwlock;
vdev_indirect_mapping_t *vdev_indirect_mapping;
vdev_indirect_births_t *vdev_indirect_births;
/*
* In memory data structures used to manage the obsolete sm, for
* indirect or removing vdevs.
*
* The vdev_obsolete_segments is the in-core record of the segments
* that are no longer referenced anywhere in the pool (due to
* being freed or remapped and not referenced by any snapshots).
* During a sync, segments are added to vdev_obsolete_segments
* via vdev_indirect_mark_obsolete(); at the end of each sync
* pass, this is appended to vdev_obsolete_sm via
* vdev_indirect_sync_obsolete(). The vdev_obsolete_lock
* protects against concurrent modifications of vdev_obsolete_segments
* from multiple zio threads.
*/
kmutex_t vdev_obsolete_lock;
range_tree_t *vdev_obsolete_segments;
space_map_t *vdev_obsolete_sm;
/*
* Protects the vdev_scan_io_queue field itself as well as the
* structure's contents (when present).
*/
kmutex_t vdev_scan_io_queue_lock;
struct dsl_scan_io_queue *vdev_scan_io_queue;
/*
* Leaf vdev state.
*/
range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */
uint64_t vdev_psize; /* physical device capacity */
uint64_t vdev_wholedisk; /* true if this is a whole disk */
uint64_t vdev_offline; /* persistent offline state */
uint64_t vdev_faulted; /* persistent faulted state */
uint64_t vdev_degraded; /* persistent degraded state */
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_rebuild_txg; /* persistent rebuilding state */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
char *vdev_enc_sysfs_path; /* enclosure sysfs path */
char *vdev_fru; /* physical FRU location */
uint64_t vdev_not_present; /* not present during import */
uint64_t vdev_unspare; /* unspare when resilvering done */
boolean_t vdev_nowritecache; /* true if flushwritecache failed */
boolean_t vdev_has_trim; /* TRIM is supported */
boolean_t vdev_has_securetrim; /* secure TRIM is supported */
boolean_t vdev_checkremove; /* temporary online test */
boolean_t vdev_forcefault; /* force online fault */
boolean_t vdev_splitting; /* split or repair in progress */
boolean_t vdev_delayed_close; /* delayed device close? */
boolean_t vdev_tmpoffline; /* device taken offline temporarily? */
boolean_t vdev_detached; /* device detached? */
boolean_t vdev_cant_read; /* vdev is failing all reads */
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
boolean_t vdev_copy_uberblocks; /* post expand copy uberblocks */
boolean_t vdev_resilver_deferred; /* resilver deferred */
boolean_t vdev_kobj_flag; /* kobj event record */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
- vdev_cache_t vdev_cache; /* physical block cache */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
uint64_t vdev_expansion_time; /* vdev's last expansion time */
list_node_t vdev_leaf_node; /* leaf vdev list */
/*
* For DTrace to work in userland (libzpool) context, these fields must
* remain at the end of the structure. DTrace will use the kernel's
* CTF definition for 'struct vdev', and since the size of a kmutex_t is
* larger in userland, the offsets for the rest of the fields would be
* incorrect.
*/
kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */
kmutex_t vdev_stat_lock; /* vdev_stat */
kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */
/*
* We rate limit ZIO delay, deadman, and checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*/
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_deadman_rl;
zfs_ratelimit_t vdev_checksum_rl;
/*
* Checksum and IO thresholds for tuning ZED
*/
uint64_t vdev_checksum_n;
uint64_t vdev_checksum_t;
uint64_t vdev_io_n;
uint64_t vdev_io_t;
};
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_be) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
/*
* MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
* ring when MMP is enabled.
*/
#define MMP_BLOCKS_PER_LABEL 1
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
#define VDEV_UBERBLOCK_COUNT(vd) \
(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
#define VDEV_UBERBLOCK_OFFSET(vd, n) \
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
} vdev_phys_t;
typedef enum vbe_vers {
/*
* The bootenv file is stored as ascii text in the envblock.
* It is used by the GRUB bootloader used on Linux to store the
* contents of the grubenv file. The file is stored as raw ASCII,
* and is protected by an embedded checksum. By default, GRUB will
* check if the boot filesystem supports storing the environment data
* in a special location, and if so, will invoke filesystem specific
* logic to retrieve it. This can be overridden by a variable, should
* the user so desire.
*/
VB_RAW = 0,
/*
* The bootenv file is converted to an nvlist and then packed into the
* envblock.
*/
VB_NVLIST = 1
} vbe_vers_t;
typedef struct vdev_boot_envblock {
uint64_t vbe_version;
char vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) -
sizeof (zio_eck_t)];
zio_eck_t vbe_zbt;
} vdev_boot_envblock_t;
_Static_assert(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE,
"vdev_boot_envblock_t wrong size");
typedef struct vdev_label {
char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
vdev_boot_envblock_t vl_be; /* 8K */
vdev_phys_t vl_vdev_phys; /* 112K */
char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
} vdev_label_t; /* 256K total */
/*
* vdev_dirty() flags
*/
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02
/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
*/
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
/*
* Size of label regions at the start and end of each leaf device.
*/
#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
#define VDEV_OFFSET_IS_LABEL(vd, off) \
(((off) < VDEV_LABEL_START_SIZE) || \
((off) >= ((vd)->vdev_psize - VDEV_LABEL_END_SIZE)))
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
#define VDEV_ALLOC_SPARE 2
#define VDEV_ALLOC_L2CACHE 3
#define VDEV_ALLOC_ROOTPOOL 4
#define VDEV_ALLOC_SPLIT 5
#define VDEV_ALLOC_ATTACH 6
/*
* Allocate or free a vdev
*/
extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
vdev_ops_t *ops);
extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
vdev_t *parent, uint_t id, int alloctype);
extern void vdev_free(vdev_t *vd);
/*
* Add or remove children and parents
*/
extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_compact_children(vdev_t *pvd);
extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
extern void vdev_remove_parent(vdev_t *cvd);
/*
* vdev sync load and sync
*/
extern boolean_t vdev_log_state_valid(vdev_t *vd);
extern int vdev_load(vdev_t *vd);
extern int vdev_dtl_load(vdev_t *vd);
extern void vdev_sync(vdev_t *vd, uint64_t txg);
extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
/*
* Available vdev types.
*/
extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
extern vdev_ops_t vdev_draid_ops;
extern vdev_ops_t vdev_draid_spare_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
extern vdev_ops_t vdev_hole_ops;
extern vdev_ops_t vdev_spare_ops;
extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_alloc(vdev_t *vd);
extern uint64_t vdev_get_nparity(vdev_t *vd);
extern uint64_t vdev_get_ndisks(vdev_t *vd);
/*
* Global variables
*/
extern int zfs_vdev_standard_sm_blksz;
/*
* Functions from vdev_indirect.c
*/
extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
extern int vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj);
extern int vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise);
/*
* Other miscellaneous functions
*/
int vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj);
void vdev_metaslab_group_create(vdev_t *vd);
uint64_t vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b);
/*
* Vdev ashift optimization tunables
*/
extern uint_t zfs_vdev_min_auto_ashift;
extern uint_t zfs_vdev_max_auto_ashift;
int param_set_min_auto_ashift(ZFS_MODULE_PARAM_ARGS);
int param_set_max_auto_ashift(ZFS_MODULE_PARAM_ARGS);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_refcount.h b/sys/contrib/openzfs/include/sys/zfs_refcount.h
index 42f846b8920a..4efa266a53c5 100644
--- a/sys/contrib/openzfs/include/sys/zfs_refcount.h
+++ b/sys/contrib/openzfs/include/sys/zfs_refcount.h
@@ -1,134 +1,140 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_ZFS_REFCOUNT_H
#define _SYS_ZFS_REFCOUNT_H
#include <sys/inttypes.h>
#include <sys/list.h>
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* If the reference is held only by the calling function and not any
* particular object, use FTAG (which is a string) for the holder_tag.
* Otherwise, use the object that holds the reference.
*/
#define FTAG ((char *)(uintptr_t)__func__)
#ifdef ZFS_DEBUG
typedef struct reference {
list_node_t ref_link;
const void *ref_holder;
uint64_t ref_number;
uint8_t *ref_removed;
} reference_t;
typedef struct refcount {
kmutex_t rc_mtx;
boolean_t rc_tracked;
list_t rc_list;
list_t rc_removed;
uint64_t rc_count;
uint64_t rc_removed_count;
} zfs_refcount_t;
/*
* Note: zfs_refcount_t must be initialized with
* refcount_create[_untracked]()
*/
void zfs_refcount_create(zfs_refcount_t *);
void zfs_refcount_create_untracked(zfs_refcount_t *);
void zfs_refcount_create_tracked(zfs_refcount_t *);
void zfs_refcount_destroy(zfs_refcount_t *);
void zfs_refcount_destroy_many(zfs_refcount_t *, uint64_t);
int zfs_refcount_is_zero(zfs_refcount_t *);
int64_t zfs_refcount_count(zfs_refcount_t *);
int64_t zfs_refcount_add(zfs_refcount_t *, const void *);
int64_t zfs_refcount_remove(zfs_refcount_t *, const void *);
/*
- * Note that (add|remove)_many add/remove one reference with "number" N,
- * _not_ make N references with "number" 1, which is what vanilla
- * zfs_refcount_(add|remove) would do if called N times.
+ * Note that (add|remove)_many adds/removes one reference with "number" N,
+ * _not_ N references with "number" 1, which is what (add|remove)_few does,
+ * or what vanilla zfs_refcount_(add|remove) called N times would do.
*
* Attempting to remove a reference with number N when none exists is a
* panic on debug kernels with reference_tracking enabled.
*/
+void zfs_refcount_add_few(zfs_refcount_t *, uint64_t, const void *);
+void zfs_refcount_remove_few(zfs_refcount_t *, uint64_t, const void *);
int64_t zfs_refcount_add_many(zfs_refcount_t *, uint64_t, const void *);
int64_t zfs_refcount_remove_many(zfs_refcount_t *, uint64_t, const void *);
void zfs_refcount_transfer(zfs_refcount_t *, zfs_refcount_t *);
void zfs_refcount_transfer_ownership(zfs_refcount_t *, const void *,
const void *);
void zfs_refcount_transfer_ownership_many(zfs_refcount_t *, uint64_t,
const void *, const void *);
boolean_t zfs_refcount_held(zfs_refcount_t *, const void *);
boolean_t zfs_refcount_not_held(zfs_refcount_t *, const void *);
void zfs_refcount_init(void);
void zfs_refcount_fini(void);
#else /* ZFS_DEBUG */
typedef struct refcount {
uint64_t rc_count;
} zfs_refcount_t;
#define zfs_refcount_create(rc) ((rc)->rc_count = 0)
#define zfs_refcount_create_untracked(rc) ((rc)->rc_count = 0)
#define zfs_refcount_create_tracked(rc) ((rc)->rc_count = 0)
#define zfs_refcount_destroy(rc) ((rc)->rc_count = 0)
#define zfs_refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
#define zfs_refcount_is_zero(rc) (zfs_refcount_count(rc) == 0)
#define zfs_refcount_count(rc) atomic_load_64(&(rc)->rc_count)
#define zfs_refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
#define zfs_refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
+#define zfs_refcount_add_few(rc, number, holder) \
+ atomic_add_64(&(rc)->rc_count, number)
+#define zfs_refcount_remove_few(rc, number, holder) \
+ atomic_add_64(&(rc)->rc_count, -number)
#define zfs_refcount_add_many(rc, number, holder) \
atomic_add_64_nv(&(rc)->rc_count, number)
#define zfs_refcount_remove_many(rc, number, holder) \
atomic_add_64_nv(&(rc)->rc_count, -number)
#define zfs_refcount_transfer(dst, src) { \
uint64_t __tmp = zfs_refcount_count(src); \
atomic_add_64(&(src)->rc_count, -__tmp); \
atomic_add_64(&(dst)->rc_count, __tmp); \
}
#define zfs_refcount_transfer_ownership(rc, ch, nh) ((void)0)
#define zfs_refcount_transfer_ownership_many(rc, nr, ch, nh) ((void)0)
#define zfs_refcount_held(rc, holder) (zfs_refcount_count(rc) > 0)
#define zfs_refcount_not_held(rc, holder) (B_TRUE)
#define zfs_refcount_init()
#define zfs_refcount_fini()
#endif /* ZFS_DEBUG */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_REFCOUNT_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_znode.h b/sys/contrib/openzfs/include/sys/zfs_znode.h
index 012e7403e2a6..2f266f53247e 100644
--- a/sys/contrib/openzfs/include/sys/zfs_znode.h
+++ b/sys/contrib/openzfs/include/sys/zfs_znode.h
@@ -1,333 +1,333 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_ZNODE_H
#define _SYS_FS_ZFS_ZNODE_H
#include <sys/zfs_acl.h>
#include <sys/zil.h>
#include <sys/zfs_project.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Additional file level attributes, that are stored
* in the upper half of z_pflags
*/
#define ZFS_READONLY 0x0000000100000000ull
#define ZFS_HIDDEN 0x0000000200000000ull
#define ZFS_SYSTEM 0x0000000400000000ull
#define ZFS_ARCHIVE 0x0000000800000000ull
#define ZFS_IMMUTABLE 0x0000001000000000ull
#define ZFS_NOUNLINK 0x0000002000000000ull
#define ZFS_APPENDONLY 0x0000004000000000ull
#define ZFS_NODUMP 0x0000008000000000ull
#define ZFS_OPAQUE 0x0000010000000000ull
#define ZFS_AV_QUARANTINED 0x0000020000000000ull
#define ZFS_AV_MODIFIED 0x0000040000000000ull
#define ZFS_REPARSE 0x0000080000000000ull
#define ZFS_OFFLINE 0x0000100000000000ull
#define ZFS_SPARSE 0x0000200000000000ull
/*
* PROJINHERIT attribute is used to indicate that the child object under the
* directory which has the PROJINHERIT attribute needs to inherit its parent
* project ID that is used by project quota.
*/
#define ZFS_PROJINHERIT 0x0000400000000000ull
/*
* PROJID attr is used internally to indicate that the object has project ID.
*/
#define ZFS_PROJID 0x0000800000000000ull
#define ZFS_ATTR_SET(zp, attr, value, pflags, tx) \
{ \
if (value) \
pflags |= attr; \
else \
pflags &= ~attr; \
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
&pflags, sizeof (pflags), tx)); \
}
/*
* Define special zfs pflags
*/
#define ZFS_XATTR 0x1 /* is an extended attribute */
#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */
#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */
#define ZFS_ACL_OBJ_ACE 0x8 /* ACL has CMPLX Object ACE */
#define ZFS_ACL_PROTECTED 0x10 /* ACL protected */
#define ZFS_ACL_DEFAULTED 0x20 /* ACL should be defaulted */
#define ZFS_ACL_AUTO_INHERIT 0x40 /* ACL should be inherited */
#define ZFS_BONUS_SCANSTAMP 0x80 /* Scanstamp in bonus area */
#define ZFS_NO_EXECS_DENIED 0x100 /* exec was given to everyone */
#define SA_ZPL_ATIME(z) z->z_attr_table[ZPL_ATIME]
#define SA_ZPL_MTIME(z) z->z_attr_table[ZPL_MTIME]
#define SA_ZPL_CTIME(z) z->z_attr_table[ZPL_CTIME]
#define SA_ZPL_CRTIME(z) z->z_attr_table[ZPL_CRTIME]
#define SA_ZPL_GEN(z) z->z_attr_table[ZPL_GEN]
#define SA_ZPL_DACL_ACES(z) z->z_attr_table[ZPL_DACL_ACES]
#define SA_ZPL_XATTR(z) z->z_attr_table[ZPL_XATTR]
#define SA_ZPL_SYMLINK(z) z->z_attr_table[ZPL_SYMLINK]
#define SA_ZPL_RDEV(z) z->z_attr_table[ZPL_RDEV]
#define SA_ZPL_SCANSTAMP(z) z->z_attr_table[ZPL_SCANSTAMP]
#define SA_ZPL_UID(z) z->z_attr_table[ZPL_UID]
#define SA_ZPL_GID(z) z->z_attr_table[ZPL_GID]
#define SA_ZPL_PARENT(z) z->z_attr_table[ZPL_PARENT]
#define SA_ZPL_LINKS(z) z->z_attr_table[ZPL_LINKS]
#define SA_ZPL_MODE(z) z->z_attr_table[ZPL_MODE]
#define SA_ZPL_DACL_COUNT(z) z->z_attr_table[ZPL_DACL_COUNT]
#define SA_ZPL_FLAGS(z) z->z_attr_table[ZPL_FLAGS]
#define SA_ZPL_SIZE(z) z->z_attr_table[ZPL_SIZE]
#define SA_ZPL_ZNODE_ACL(z) z->z_attr_table[ZPL_ZNODE_ACL]
#define SA_ZPL_DXATTR(z) z->z_attr_table[ZPL_DXATTR]
#define SA_ZPL_PAD(z) z->z_attr_table[ZPL_PAD]
#define SA_ZPL_PROJID(z) z->z_attr_table[ZPL_PROJID]
/*
* Is ID ephemeral?
*/
#define IS_EPHEMERAL(x) (x > MAXUID)
/*
* Should we use FUIDs?
*/
#define USE_FUIDS(version, os) (version >= ZPL_VERSION_FUID && \
spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID)
#define USE_SA(version, os) (version >= ZPL_VERSION_SA && \
spa_version(dmu_objset_spa(os)) >= SPA_VERSION_SA)
#define MASTER_NODE_OBJ 1
/*
* Special attributes for master node.
* "userquota@", "groupquota@" and "projectquota@" are also valid (from
* zfs_userquota_prop_prefixes[]).
*/
#define ZFS_FSID "FSID"
#define ZFS_UNLINKED_SET "DELETE_QUEUE"
#define ZFS_ROOT_OBJ "ROOT"
#define ZPL_VERSION_STR "VERSION"
#define ZFS_FUID_TABLES "FUID"
#define ZFS_SHARES_DIR "SHARES"
#define ZFS_SA_ATTRS "SA_ATTRS"
/*
* Convert mode bits (zp_mode) to BSD-style DT_* values for storing in
* the directory entries. On Linux systems this value is already
* defined correctly as part of the /usr/include/dirent.h header file.
*/
#ifndef IFTODT
#define IFTODT(mode) (((mode) & S_IFMT) >> 12)
#endif
/*
* The directory entry has the type (currently unused on Solaris) in the
* top 4 bits, and the object number in the low 48 bits. The "middle"
* 12 bits are unused.
*/
#define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
#define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
extern int zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len);
+extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
#ifdef _KERNEL
#include <sys/zfs_znode_impl.h>
/*
* Directory entry locks control access to directory entries.
* They are used to protect creates, deletes, and renames.
* Each directory znode has a mutex and a list of locked names.
*/
typedef struct zfs_dirlock {
char *dl_name; /* directory entry being locked */
uint32_t dl_sharecnt; /* 0 if exclusive, > 0 if shared */
uint8_t dl_namelock; /* 1 if z_name_lock is NOT held */
uint16_t dl_namesize; /* set if dl_name was allocated */
kcondvar_t dl_cv; /* wait for entry to be unlocked */
struct znode *dl_dzp; /* directory znode */
struct zfs_dirlock *dl_next; /* next in z_dirlocks list */
} zfs_dirlock_t;
typedef struct znode {
uint64_t z_id; /* object ID for this znode */
kmutex_t z_lock; /* znode modification lock */
krwlock_t z_parent_lock; /* parent lock for directories */
krwlock_t z_name_lock; /* "master" lock for dirent locks */
zfs_dirlock_t *z_dirlocks; /* directory entry lock list */
zfs_rangelock_t z_rangelock; /* file range locks */
boolean_t z_unlinked; /* file has been unlinked */
boolean_t z_atime_dirty; /* atime needs to be synced */
boolean_t z_zn_prefetch; /* Prefetch znodes? */
boolean_t z_is_sa; /* are we native sa? */
boolean_t z_is_ctldir; /* are we .zfs entry */
boolean_t z_suspended; /* extra ref from a suspend? */
uint_t z_blksz; /* block size in bytes */
uint_t z_seq; /* modification sequence number */
uint64_t z_mapcnt; /* number of pages mapped to file */
uint64_t z_dnodesize; /* dnode size */
uint64_t z_size; /* file size (cached) */
uint64_t z_pflags; /* pflags (cached) */
uint32_t z_sync_cnt; /* synchronous open count */
uint32_t z_sync_writes_cnt; /* synchronous write count */
uint32_t z_async_writes_cnt; /* asynchronous write count */
mode_t z_mode; /* mode (cached) */
kmutex_t z_acl_lock; /* acl data lock */
zfs_acl_t *z_acl_cached; /* cached acl */
krwlock_t z_xattr_lock; /* xattr data lock */
nvlist_t *z_xattr_cached; /* cached xattrs */
uint64_t z_xattr_parent; /* parent obj for this xattr */
uint64_t z_projid; /* project ID */
list_node_t z_link_node; /* all znodes in fs link */
sa_handle_t *z_sa_hdl; /* handle to sa data */
/*
* Platform specific field, defined by each platform and only
* accessible from platform specific code.
*/
ZNODE_OS_FIELDS;
} znode_t;
/* Verifies the znode is valid. */
static inline int
zfs_verify_zp(znode_t *zp)
{
if (unlikely(zp->z_sa_hdl == NULL))
return (SET_ERROR(EIO));
return (0);
}
/* zfs_enter and zfs_verify_zp together */
static inline int
zfs_enter_verify_zp(zfsvfs_t *zfsvfs, znode_t *zp, const char *tag)
{
int error;
if ((error = zfs_enter(zfsvfs, tag)) != 0)
return (error);
if ((error = zfs_verify_zp(zp)) != 0) {
zfs_exit(zfsvfs, tag);
return (error);
}
return (0);
}
typedef struct znode_hold {
uint64_t zh_obj; /* object id */
avl_node_t zh_node; /* avl tree linkage */
kmutex_t zh_lock; /* lock serializing object access */
int zh_refcount; /* active consumer reference count */
} znode_hold_t;
static inline uint64_t
zfs_inherit_projid(znode_t *dzp)
{
return ((dzp->z_pflags & ZFS_PROJINHERIT) ? dzp->z_projid :
ZFS_DEFAULT_PROJID);
}
/*
* Timestamp defines
*/
#define ACCESSED (ATTR_ATIME)
#define STATE_CHANGED (ATTR_CTIME)
#define CONTENT_MODIFIED (ATTR_MTIME | ATTR_CTIME)
extern int zfs_init_fs(zfsvfs_t *, znode_t **);
extern void zfs_set_dataprop(objset_t *);
extern void zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *,
dmu_tx_t *tx);
extern void zfs_tstamp_update_setup(znode_t *, uint_t, uint64_t [2],
uint64_t [2]);
extern void zfs_grow_blocksize(znode_t *, uint64_t, dmu_tx_t *);
extern int zfs_freesp(znode_t *, uint64_t, uint64_t, int, boolean_t);
extern void zfs_znode_init(void);
extern void zfs_znode_fini(void);
extern int zfs_znode_hold_compare(const void *, const void *);
extern znode_hold_t *zfs_znode_hold_enter(zfsvfs_t *, uint64_t);
extern void zfs_znode_hold_exit(zfsvfs_t *, znode_hold_t *);
extern int zfs_zget(zfsvfs_t *, uint64_t, znode_t **);
extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
extern void zfs_remove_op_tables(void);
extern int zfs_create_op_tables(void);
extern dev_t zfs_cmpldev(uint64_t);
-extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
extern int zfs_get_stats(objset_t *os, nvlist_t *nv);
extern boolean_t zfs_get_vfs_flag_unmounted(objset_t *os);
extern void zfs_znode_dmu_fini(znode_t *);
extern void zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, vsecattr_t *,
zfs_fuid_info_t *, vattr_t *vap);
extern int zfs_log_create_txtype(zil_create_t, vsecattr_t *vsecp,
vattr_t *vap);
extern void zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, const char *name, uint64_t foid, boolean_t unlinked);
#define ZFS_NO_OBJECT 0 /* no object id */
extern void zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name);
extern void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *dzp, znode_t *zp, const char *name, const char *link);
extern void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
znode_t *sdzp, const char *sname, znode_t *tdzp, const char *dname,
znode_t *szp);
extern void zfs_log_rename_exchange(zilog_t *zilog, dmu_tx_t *tx,
uint64_t txtype, znode_t *sdzp, const char *sname, znode_t *tdzp,
const char *dname, znode_t *szp);
extern void zfs_log_rename_whiteout(zilog_t *zilog, dmu_tx_t *tx,
uint64_t txtype, znode_t *sdzp, const char *sname, znode_t *tdzp,
const char *dname, znode_t *szp, znode_t *wzp);
extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, offset_t off, ssize_t len, int ioflag,
zil_callback_t callback, void *callback_data);
extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, uint64_t off, uint64_t len);
extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp);
extern void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
vsecattr_t *vsecp, zfs_fuid_info_t *fuidp);
extern void zfs_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, uint64_t offset, uint64_t length, uint64_t blksz,
const blkptr_t *bps, size_t nbps);
extern void zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx);
extern void zfs_upgrade(zfsvfs_t *zfsvfs, dmu_tx_t *tx);
extern void zfs_log_setsaxattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
znode_t *zp, const char *name, const void *value, size_t size);
extern void zfs_znode_update_vfs(struct znode *);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_ZNODE_H */
diff --git a/sys/contrib/openzfs/include/sys/zil.h b/sys/contrib/openzfs/include/sys/zil.h
index cff8ebcad819..4747ecc067a9 100644
--- a/sys/contrib/openzfs/include/sys/zil.h
+++ b/sys/contrib/openzfs/include/sys/zil.h
@@ -1,603 +1,611 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_ZIL_H
#define _SYS_ZIL_H
#include <sys/types.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/zio_crypt.h>
#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_pool;
struct dsl_dataset;
struct lwb;
/*
* Intent log format:
*
* Each objset has its own intent log. The log header (zil_header_t)
* for objset N's intent log is kept in the Nth object of the SPA's
* intent_log objset. The log header points to a chain of log blocks,
* each of which contains log records (i.e., transactions) followed by
* a log block trailer (zil_trailer_t). The format of a log record
* depends on the record (or transaction) type, but all records begin
* with a common structure that defines the type, length, and txg.
*/
/*
* Intent log header - this on disk structure holds fields to manage
* the log. All fields are 64 bit to easily handle cross architectures.
*/
typedef struct zil_header {
uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
uint64_t zh_replay_seq; /* highest replayed sequence number */
blkptr_t zh_log; /* log chain */
uint64_t zh_claim_blk_seq; /* highest claimed block sequence number */
uint64_t zh_flags; /* header flags */
uint64_t zh_claim_lr_seq; /* highest claimed lr sequence number */
uint64_t zh_pad[3];
} zil_header_t;
/*
* zh_flags bit settings
*/
#define ZIL_REPLAY_NEEDED 0x1 /* replay needed - internal only */
#define ZIL_CLAIM_LR_SEQ_VALID 0x2 /* zh_claim_lr_seq field is valid */
/*
* Log block chaining.
*
* Log blocks are chained together. Originally they were chained at the
* end of the block. For performance reasons the chain was moved to the
* beginning of the block which allows writes for only the data being used.
* The older position is supported for backwards compatibility.
*
* The zio_eck_t contains a zec_cksum which for the intent log is
* the sequence number of this log block. A seq of 0 is invalid.
* The zec_cksum is checked by the SPA against the sequence
* number passed in the blk_cksum field of the blkptr_t
*/
typedef struct zil_chain {
uint64_t zc_pad;
blkptr_t zc_next_blk; /* next block in chain */
uint64_t zc_nused; /* bytes in log block used */
zio_eck_t zc_eck; /* block trailer */
} zil_chain_t;
#define ZIL_MIN_BLKSZ 4096ULL
/*
* ziltest is by and large an ugly hack, but very useful in
* checking replay without tedious work.
* When running ziltest we want to keep all itx's and so maintain
* a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
* We subtract TXG_CONCURRENT_STATES to allow for common code.
*/
#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
/*
* The words of a log block checksum.
*/
#define ZIL_ZC_GUID_0 0
#define ZIL_ZC_GUID_1 1
#define ZIL_ZC_OBJSET 2
#define ZIL_ZC_SEQ 3
typedef enum zil_create {
Z_FILE,
Z_DIR,
Z_XATTRDIR,
} zil_create_t;
/*
* size of xvattr log section.
* its composed of lr_attr_t + xvattr bitmap + 2 64 bit timestamps
* for create time and a single 64 bit integer for all of the attributes,
* and 4 64 bit integers (32 bytes) for the scanstamp.
*
*/
#define ZIL_XVAT_SIZE(mapsize) \
sizeof (lr_attr_t) + (sizeof (uint32_t) * (mapsize - 1)) + \
(sizeof (uint64_t) * 7)
/*
* Size of ACL in log. The ACE data is padded out to properly align
* on 8 byte boundary.
*/
#define ZIL_ACE_LENGTH(x) (roundup(x, sizeof (uint64_t)))
/*
* Intent log transaction types and record structures
*/
#define TX_COMMIT 0 /* Commit marker (no on-disk state) */
#define TX_CREATE 1 /* Create file */
#define TX_MKDIR 2 /* Make directory */
#define TX_MKXATTR 3 /* Make XATTR directory */
#define TX_SYMLINK 4 /* Create symbolic link to a file */
#define TX_REMOVE 5 /* Remove file */
#define TX_RMDIR 6 /* Remove directory */
#define TX_LINK 7 /* Create hard link to a file */
#define TX_RENAME 8 /* Rename a file */
#define TX_WRITE 9 /* File write */
#define TX_TRUNCATE 10 /* Truncate a file */
#define TX_SETATTR 11 /* Set file attributes */
#define TX_ACL_V0 12 /* Set old formatted ACL */
#define TX_ACL 13 /* Set ACL */
#define TX_CREATE_ACL 14 /* create with ACL */
#define TX_CREATE_ATTR 15 /* create + attrs */
#define TX_CREATE_ACL_ATTR 16 /* create with ACL + attrs */
#define TX_MKDIR_ACL 17 /* mkdir with ACL */
#define TX_MKDIR_ATTR 18 /* mkdir with attr */
#define TX_MKDIR_ACL_ATTR 19 /* mkdir with ACL + attrs */
#define TX_WRITE2 20 /* dmu_sync EALREADY write */
#define TX_SETSAXATTR 21 /* Set sa xattrs on file */
#define TX_RENAME_EXCHANGE 22 /* Atomic swap via renameat2 */
#define TX_RENAME_WHITEOUT 23 /* Atomic whiteout via renameat2 */
#define TX_CLONE_RANGE 24 /* Clone a file range */
#define TX_MAX_TYPE 25 /* Max transaction type */
/*
* The transactions for mkdir, symlink, remove, rmdir, link, and rename
* may have the following bit set, indicating the original request
* specified case-insensitive handling of names.
*/
#define TX_CI ((uint64_t)0x1 << 63) /* case-insensitive behavior requested */
/*
* Transactions for operations below can be logged out of order.
* For convenience in the code, all such records must have lr_foid
* at the same offset.
*/
#define TX_OOO(txtype) \
((txtype) == TX_WRITE || \
(txtype) == TX_TRUNCATE || \
(txtype) == TX_SETATTR || \
(txtype) == TX_ACL_V0 || \
(txtype) == TX_ACL || \
(txtype) == TX_WRITE2 || \
(txtype) == TX_SETSAXATTR || \
(txtype) == TX_CLONE_RANGE)
/*
* The number of dnode slots consumed by the object is stored in the 8
* unused upper bits of the object ID. We subtract 1 from the value
* stored on disk for compatibility with implementations that don't
* support large dnodes. The slot count for a single-slot dnode will
* contain 0 for those bits to preserve the log record format for
* "small" dnodes.
*/
#define LR_FOID_GET_SLOTS(oid) (BF64_GET((oid), 56, 8) + 1)
#define LR_FOID_SET_SLOTS(oid, x) BF64_SET((oid), 56, 8, (x) - 1)
#define LR_FOID_GET_OBJ(oid) BF64_GET((oid), 0, DN_MAX_OBJECT_SHIFT)
#define LR_FOID_SET_OBJ(oid, x) BF64_SET((oid), 0, DN_MAX_OBJECT_SHIFT, (x))
/*
* Format of log records.
* The fields are carefully defined to allow them to be aligned
* and sized the same on sparc & intel architectures.
* Each log record has a common structure at the beginning.
*
* The log record on disk (lrc_seq) holds the sequence number of all log
* records which is used to ensure we don't replay the same record.
*/
typedef struct { /* common log record header */
uint64_t lrc_txtype; /* intent log transaction type */
uint64_t lrc_reclen; /* transaction record length */
uint64_t lrc_txg; /* dmu transaction group number */
uint64_t lrc_seq; /* see comment above */
} lr_t;
/*
* Common start of all out-of-order record types (TX_OOO() above).
*/
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* object id */
} lr_ooo_t;
/*
* Additional lr_attr_t fields.
*/
typedef struct {
uint64_t lr_attr_attrs; /* all of the attributes */
uint64_t lr_attr_crtime[2]; /* create time */
uint8_t lr_attr_scanstamp[32];
} lr_attr_end_t;
/*
* Handle option extended vattr attributes.
*
* Whenever new attributes are added the version number
* will need to be updated as will code in
* zfs_log.c and zfs_replay.c
*/
typedef struct {
uint32_t lr_attr_masksize; /* number of elements in array */
uint32_t lr_attr_bitmap; /* First entry of array */
/* remainder of array and additional lr_attr_end_t fields */
} lr_attr_t;
/*
* log record for creates without optional ACL.
* This log record does support optional xvattr_t attributes.
*/
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* object id of directory */
uint64_t lr_foid; /* object id of created file object */
uint64_t lr_mode; /* mode of object */
uint64_t lr_uid; /* uid of object */
uint64_t lr_gid; /* gid of object */
uint64_t lr_gen; /* generation (txg of creation) */
uint64_t lr_crtime[2]; /* creation time */
uint64_t lr_rdev; /* rdev of object to create */
/* name of object to create follows this */
/* for symlinks, link content follows name */
/* for creates with xvattr data, the name follows the xvattr info */
} lr_create_t;
/*
* FUID ACL record will be an array of ACEs from the original ACL.
* If this array includes ephemeral IDs, the record will also include
* an array of log-specific FUIDs to replace the ephemeral IDs.
* Only one copy of each unique domain will be present, so the log-specific
* FUIDs will use an index into a compressed domain table. On replay this
* information will be used to construct real FUIDs (and bypass idmap,
* since it may not be available).
*/
/*
* Log record for creates with optional ACL
* This log record is also used for recording any FUID
* information needed for replaying the create. If the
* file doesn't have any actual ACEs then the lr_aclcnt
* would be zero.
*
* After lr_acl_flags, there are a lr_acl_bytes number of variable sized ace's.
* If create is also setting xvattr's, then acl data follows xvattr.
* If ACE FUIDs are needed then they will follow the xvattr_t. Following
* the FUIDs will be the domain table information. The FUIDs for the owner
* and group will be in lr_create. Name follows ACL data.
*/
typedef struct {
lr_create_t lr_create; /* common create portion */
uint64_t lr_aclcnt; /* number of ACEs in ACL */
uint64_t lr_domcnt; /* number of unique domains */
uint64_t lr_fuidcnt; /* number of real fuids */
uint64_t lr_acl_bytes; /* number of bytes in ACL */
uint64_t lr_acl_flags; /* ACL flags */
} lr_acl_create_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* obj id of directory */
/* name of object to remove follows this */
} lr_remove_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* obj id of directory */
uint64_t lr_link_obj; /* obj id of link */
/* name of object to link follows this */
} lr_link_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_sdoid; /* obj id of source directory */
uint64_t lr_tdoid; /* obj id of target directory */
/* 2 strings: names of source and destination follow this */
} lr_rename_t;
typedef struct {
lr_rename_t lr_rename; /* common rename portion */
/* members related to the whiteout file (based on lr_create_t) */
uint64_t lr_wfoid; /* obj id of the new whiteout file */
uint64_t lr_wmode; /* mode of object */
uint64_t lr_wuid; /* uid of whiteout */
uint64_t lr_wgid; /* gid of whiteout */
uint64_t lr_wgen; /* generation (txg of creation) */
uint64_t lr_wcrtime[2]; /* creation time */
uint64_t lr_wrdev; /* always makedev(0, 0) */
/* 2 strings: names of source and destination follow this */
} lr_rename_whiteout_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to write */
uint64_t lr_offset; /* offset to write to */
uint64_t lr_length; /* user data length to write */
uint64_t lr_blkoff; /* no longer used */
blkptr_t lr_blkptr; /* spa block pointer for replay */
/* write data will follow for small writes */
} lr_write_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* object id of file to truncate */
uint64_t lr_offset; /* offset to truncate from */
uint64_t lr_length; /* length to truncate */
} lr_truncate_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to change attributes */
uint64_t lr_mask; /* mask of attributes to set */
uint64_t lr_mode; /* mode to set */
uint64_t lr_uid; /* uid to set */
uint64_t lr_gid; /* gid to set */
uint64_t lr_size; /* size to set */
uint64_t lr_atime[2]; /* access time */
uint64_t lr_mtime[2]; /* modification time */
/* optional attribute lr_attr_t may be here */
} lr_setattr_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to change attributes */
uint64_t lr_size;
/* xattr name and value follows */
} lr_setsaxattr_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* obj id of file */
uint64_t lr_aclcnt; /* number of acl entries */
/* lr_aclcnt number of ace_t entries follow this */
} lr_acl_v0_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* obj id of file */
uint64_t lr_aclcnt; /* number of ACEs in ACL */
uint64_t lr_domcnt; /* number of unique domains */
uint64_t lr_fuidcnt; /* number of real fuids */
uint64_t lr_acl_bytes; /* number of bytes in ACL */
uint64_t lr_acl_flags; /* ACL flags */
/* lr_acl_bytes number of variable sized ace's follows */
} lr_acl_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to clone into */
uint64_t lr_offset; /* offset to clone to */
uint64_t lr_length; /* length of the blocks to clone */
uint64_t lr_blksz; /* file's block size */
uint64_t lr_nbps; /* number of block pointers */
blkptr_t lr_bps[];
/* block pointers of the blocks to clone follows */
} lr_clone_range_t;
/*
* ZIL structure definitions, interface function prototype and globals.
*/
/*
* Writes are handled in three different ways:
*
* WR_INDIRECT:
* In this mode, if we need to commit the write later, then the block
* is immediately written into the file system (using dmu_sync),
* and a pointer to the block is put into the log record.
* When the txg commits the block is linked in.
* This saves additionally writing the data into the log record.
* There are a few requirements for this to occur:
* - write is greater than zfs/zvol_immediate_write_sz
* - not using slogs (as slogs are assumed to always be faster
* than writing into the main pool)
* - the write occupies only one block
* WR_COPIED:
* If we know we'll immediately be committing the
* transaction (O_SYNC or O_DSYNC), then we allocate a larger
* log record here for the data and copy the data in.
* WR_NEED_COPY:
* Otherwise we don't allocate a buffer, and *if* we need to
* flush the write later then a buffer is allocated and
* we retrieve the data using the dmu.
*/
typedef enum {
WR_INDIRECT, /* indirect - a large write (dmu_sync() data */
/* and put blkptr in log, rather than actual data) */
WR_COPIED, /* immediate - data is copied into lr_write_t */
WR_NEED_COPY, /* immediate - data needs to be copied if pushed */
WR_NUM_STATES /* number of states */
} itx_wr_state_t;
typedef void (*zil_callback_t)(void *data);
typedef struct itx {
list_node_t itx_node; /* linkage on zl_itx_list */
void *itx_private; /* type-specific opaque data */
itx_wr_state_t itx_wr_state; /* write state */
uint8_t itx_sync; /* synchronous transaction */
zil_callback_t itx_callback; /* Called when the itx is persistent */
void *itx_callback_data; /* User data for the callback */
size_t itx_size; /* allocated itx structure size */
uint64_t itx_oid; /* object id */
uint64_t itx_gen; /* gen number for zfs_get_data */
lr_t itx_lr; /* common part of log record */
/* followed by type-specific part of lr_xx_t and its immediate data */
} itx_t;
/*
* Used for zil kstat.
*/
typedef struct zil_stats {
/*
* Number of times a ZIL commit (e.g. fsync) has been requested.
*/
kstat_named_t zil_commit_count;
/*
* Number of times the ZIL has been flushed to stable storage.
* This is less than zil_commit_count when commits are "merged"
* (see the documentation above zil_commit()).
*/
kstat_named_t zil_commit_writer_count;
/*
* Number of transactions (reads, writes, renames, etc.)
* that have been committed.
*/
kstat_named_t zil_itx_count;
/*
* See the documentation for itx_wr_state_t above.
* Note that "bytes" accumulates the length of the transactions
* (i.e. data), not the actual log record sizes.
*/
kstat_named_t zil_itx_indirect_count;
kstat_named_t zil_itx_indirect_bytes;
kstat_named_t zil_itx_copied_count;
kstat_named_t zil_itx_copied_bytes;
kstat_named_t zil_itx_needcopy_count;
kstat_named_t zil_itx_needcopy_bytes;
/*
* Transactions which have been allocated to the "normal"
* (i.e. not slog) storage pool. Note that "bytes" accumulate
* the actual log record sizes - which do not include the actual
- * data in case of indirect writes.
+ * data in case of indirect writes. bytes <= write <= alloc.
*/
kstat_named_t zil_itx_metaslab_normal_count;
kstat_named_t zil_itx_metaslab_normal_bytes;
+ kstat_named_t zil_itx_metaslab_normal_write;
+ kstat_named_t zil_itx_metaslab_normal_alloc;
/*
* Transactions which have been allocated to the "slog" storage pool.
* If there are no separate log devices, this is the same as the
- * "normal" pool.
+ * "normal" pool. bytes <= write <= alloc.
*/
kstat_named_t zil_itx_metaslab_slog_count;
kstat_named_t zil_itx_metaslab_slog_bytes;
+ kstat_named_t zil_itx_metaslab_slog_write;
+ kstat_named_t zil_itx_metaslab_slog_alloc;
} zil_kstat_values_t;
typedef struct zil_sums {
wmsum_t zil_commit_count;
wmsum_t zil_commit_writer_count;
wmsum_t zil_itx_count;
wmsum_t zil_itx_indirect_count;
wmsum_t zil_itx_indirect_bytes;
wmsum_t zil_itx_copied_count;
wmsum_t zil_itx_copied_bytes;
wmsum_t zil_itx_needcopy_count;
wmsum_t zil_itx_needcopy_bytes;
wmsum_t zil_itx_metaslab_normal_count;
wmsum_t zil_itx_metaslab_normal_bytes;
+ wmsum_t zil_itx_metaslab_normal_write;
+ wmsum_t zil_itx_metaslab_normal_alloc;
wmsum_t zil_itx_metaslab_slog_count;
wmsum_t zil_itx_metaslab_slog_bytes;
+ wmsum_t zil_itx_metaslab_slog_write;
+ wmsum_t zil_itx_metaslab_slog_alloc;
} zil_sums_t;
#define ZIL_STAT_INCR(zil, stat, val) \
do { \
int64_t tmpval = (val); \
wmsum_add(&(zil_sums_global.stat), tmpval); \
if ((zil)->zl_sums) \
wmsum_add(&((zil)->zl_sums->stat), tmpval); \
} while (0)
#define ZIL_STAT_BUMP(zil, stat) \
ZIL_STAT_INCR(zil, stat, 1);
typedef int zil_parse_blk_func_t(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t txg);
typedef int zil_parse_lr_func_t(zilog_t *zilog, const lr_t *lr, void *arg,
uint64_t txg);
typedef int zil_replay_func_t(void *arg1, void *arg2, boolean_t byteswap);
typedef int zil_get_data_t(void *arg, uint64_t arg2, lr_write_t *lr, char *dbuf,
struct lwb *lwb, zio_t *zio);
extern int zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt);
extern void zil_init(void);
extern void zil_fini(void);
extern zilog_t *zil_alloc(objset_t *os, zil_header_t *zh_phys);
extern void zil_free(zilog_t *zilog);
extern zilog_t *zil_open(objset_t *os, zil_get_data_t *get_data,
zil_sums_t *zil_sums);
extern void zil_close(zilog_t *zilog);
extern boolean_t zil_replay(objset_t *os, void *arg,
zil_replay_func_t *const replay_func[TX_MAX_TYPE]);
extern boolean_t zil_replaying(zilog_t *zilog, dmu_tx_t *tx);
extern boolean_t zil_destroy(zilog_t *zilog, boolean_t keep_first);
extern void zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx);
extern itx_t *zil_itx_create(uint64_t txtype, size_t lrsize);
extern void zil_itx_destroy(itx_t *itx);
extern void zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx);
extern void zil_async_to_sync(zilog_t *zilog, uint64_t oid);
extern void zil_commit(zilog_t *zilog, uint64_t oid);
extern void zil_commit_impl(zilog_t *zilog, uint64_t oid);
extern void zil_remove_async(zilog_t *zilog, uint64_t oid);
extern int zil_reset(const char *osname, void *txarg);
extern int zil_claim(struct dsl_pool *dp,
struct dsl_dataset *ds, void *txarg);
extern int zil_check_log_chain(struct dsl_pool *dp,
struct dsl_dataset *ds, void *tx);
extern void zil_sync(zilog_t *zilog, dmu_tx_t *tx);
extern void zil_clean(zilog_t *zilog, uint64_t synced_txg);
extern int zil_suspend(const char *osname, void **cookiep);
extern void zil_resume(void *cookie);
extern void zil_lwb_add_block(struct lwb *lwb, const blkptr_t *bp);
extern void zil_lwb_add_txg(struct lwb *lwb, uint64_t txg);
extern int zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp);
extern void zil_set_sync(zilog_t *zilog, uint64_t syncval);
extern void zil_set_logbias(zilog_t *zilog, uint64_t slogval);
extern uint64_t zil_max_copied_data(zilog_t *zilog);
extern uint64_t zil_max_log_data(zilog_t *zilog, size_t hdrsize);
extern void zil_sums_init(zil_sums_t *zs);
extern void zil_sums_fini(zil_sums_t *zs);
extern void zil_kstat_values_update(zil_kstat_values_t *zs,
zil_sums_t *zil_sums);
extern int zil_replay_disable;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZIL_H */
diff --git a/sys/contrib/openzfs/include/sys/zil_impl.h b/sys/contrib/openzfs/include/sys/zil_impl.h
index bb85bf6d1eb1..03a409c5257c 100644
--- a/sys/contrib/openzfs/include/sys/zil_impl.h
+++ b/sys/contrib/openzfs/include/sys/zil_impl.h
@@ -1,239 +1,242 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_ZIL_IMPL_H
#define _SYS_ZIL_IMPL_H
#include <sys/zil.h>
#include <sys/dmu_objset.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Possible states for a given lwb structure.
*
* An lwb will start out in the "closed" state, and then transition to
* the "opened" state via a call to zil_lwb_write_open(). When
* transitioning from "closed" to "opened" the zilog's "zl_issuer_lock"
* must be held.
*
* After the lwb is "opened", it can transition into the "issued" state
- * via zil_lwb_write_issue(). Again, the zilog's "zl_issuer_lock" must
+ * via zil_lwb_write_close(). Again, the zilog's "zl_issuer_lock" must
* be held when making this transition.
*
* After the lwb's write zio completes, it transitions into the "write
* done" state via zil_lwb_write_done(); and then into the "flush done"
* state via zil_lwb_flush_vdevs_done(). When transitioning from
* "issued" to "write done", and then from "write done" to "flush done",
* the zilog's "zl_lock" must be held, *not* the "zl_issuer_lock".
*
* The zilog's "zl_issuer_lock" can become heavily contended in certain
* workloads, so we specifically avoid acquiring that lock when
* transitioning an lwb from "issued" to "done". This allows us to avoid
* having to acquire the "zl_issuer_lock" for each lwb ZIO completion,
* which would have added more lock contention on an already heavily
* contended lock.
*
* Additionally, correctness when reading an lwb's state is often
* achieved by exploiting the fact that these state transitions occur in
* this specific order; i.e. "closed" to "opened" to "issued" to "done".
*
* Thus, if an lwb is in the "closed" or "opened" state, holding the
* "zl_issuer_lock" will prevent a concurrent thread from transitioning
* that lwb to the "issued" state. Likewise, if an lwb is already in the
* "issued" state, holding the "zl_lock" will prevent a concurrent
* thread from transitioning that lwb to the "write done" state.
*/
typedef enum {
LWB_STATE_CLOSED,
LWB_STATE_OPENED,
LWB_STATE_ISSUED,
LWB_STATE_WRITE_DONE,
LWB_STATE_FLUSH_DONE,
LWB_NUM_STATES
} lwb_state_t;
/*
* Log write block (lwb)
*
* Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
* will be protected by the zilog's "zl_issuer_lock". Basically, prior
* to it being issued, it will only be accessed by the thread that's
* holding the "zl_issuer_lock". After the lwb is issued, the zilog's
* "zl_lock" is used to protect the lwb against concurrent access.
*/
typedef struct lwb {
zilog_t *lwb_zilog; /* back pointer to log struct */
blkptr_t lwb_blk; /* on disk address of this log blk */
boolean_t lwb_fastwrite; /* is blk marked for fastwrite? */
boolean_t lwb_slog; /* lwb_blk is on SLOG device */
+ boolean_t lwb_indirect; /* do not postpone zil_lwb_commit() */
int lwb_nused; /* # used bytes in buffer */
+ int lwb_nfilled; /* # filled bytes in buffer */
int lwb_sz; /* size of block and buffer */
lwb_state_t lwb_state; /* the state of this lwb */
char *lwb_buf; /* log write buffer */
zio_t *lwb_write_zio; /* zio for the lwb buffer */
zio_t *lwb_root_zio; /* root zio for lwb write and flushes */
+ hrtime_t lwb_issued_timestamp; /* when was the lwb issued? */
uint64_t lwb_issued_txg; /* the txg when the write is issued */
uint64_t lwb_max_txg; /* highest txg in this lwb */
list_node_t lwb_node; /* zilog->zl_lwb_list linkage */
+ list_node_t lwb_issue_node; /* linkage of lwbs ready for issue */
list_t lwb_itxs; /* list of itx's */
list_t lwb_waiters; /* list of zil_commit_waiter's */
avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
- hrtime_t lwb_issued_timestamp; /* when was the lwb issued? */
} lwb_t;
/*
* ZIL commit waiter.
*
* This structure is allocated each time zil_commit() is called, and is
* used by zil_commit() to communicate with other parts of the ZIL, such
* that zil_commit() can know when it safe for it return. For more
* details, see the comment above zil_commit().
*
* The "zcw_lock" field is used to protect the commit waiter against
* concurrent access. This lock is often acquired while already holding
* the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
* zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
* of this. Thus, one must be careful not to acquire the
* "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
* e.g. see the zil_commit_waiter_timeout() function.
*/
typedef struct zil_commit_waiter {
kcondvar_t zcw_cv; /* signalled when "done" */
kmutex_t zcw_lock; /* protects fields of this struct */
list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
lwb_t *zcw_lwb; /* back pointer to lwb when linked */
boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
int zcw_zio_error; /* contains the zio io_error value */
} zil_commit_waiter_t;
/*
* Intent log transaction lists
*/
typedef struct itxs {
list_t i_sync_list; /* list of synchronous itxs */
avl_tree_t i_async_tree; /* tree of foids for async itxs */
} itxs_t;
typedef struct itxg {
kmutex_t itxg_lock; /* lock for this structure */
uint64_t itxg_txg; /* txg for this chain */
itxs_t *itxg_itxs; /* sync and async itxs */
} itxg_t;
/* for async nodes we build up an AVL tree of lists of async itxs per file */
typedef struct itx_async_node {
uint64_t ia_foid; /* file object id */
list_t ia_list; /* list of async itxs for this foid */
avl_node_t ia_node; /* AVL tree linkage */
} itx_async_node_t;
/*
* Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs
* we've touched so we know which ones need a write cache flush at the end.
*/
typedef struct zil_vdev_node {
uint64_t zv_vdev; /* vdev to be flushed */
avl_node_t zv_node; /* AVL tree linkage */
} zil_vdev_node_t;
#define ZIL_PREV_BLKS 16
/*
* Stable storage intent log management structure. One per dataset.
*/
struct zilog {
kmutex_t zl_lock; /* protects most zilog_t fields */
struct dsl_pool *zl_dmu_pool; /* DSL pool */
spa_t *zl_spa; /* handle for read/write log */
const zil_header_t *zl_header; /* log header buffer */
objset_t *zl_os; /* object set we're logging */
zil_get_data_t *zl_get_data; /* callback to get object content */
lwb_t *zl_last_lwb_opened; /* most recent lwb opened */
hrtime_t zl_last_lwb_latency; /* zio latency of last lwb done */
uint64_t zl_lr_seq; /* on-disk log record sequence number */
uint64_t zl_commit_lr_seq; /* last committed on-disk lr seq */
uint64_t zl_destroy_txg; /* txg of last zil_destroy() */
uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
uint64_t zl_replaying_seq; /* current replay seq number */
uint32_t zl_suspend; /* log suspend count */
kcondvar_t zl_cv_suspend; /* log suspend completion */
uint8_t zl_suspending; /* log is currently suspending */
uint8_t zl_keep_first; /* keep first log block in destroy */
uint8_t zl_replay; /* replaying records while set */
uint8_t zl_stop_sync; /* for debugging */
kmutex_t zl_issuer_lock; /* single writer, per ZIL, at a time */
uint8_t zl_logbias; /* latency or throughput */
uint8_t zl_sync; /* synchronous or asynchronous */
int zl_parse_error; /* last zil_parse() error */
uint64_t zl_parse_blk_seq; /* highest blk seq on last parse */
uint64_t zl_parse_lr_seq; /* highest lr seq on last parse */
uint64_t zl_parse_blk_count; /* number of blocks parsed */
uint64_t zl_parse_lr_count; /* number of log records parsed */
itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */
list_t zl_itx_commit_list; /* itx list to be committed */
uint64_t zl_cur_used; /* current commit log size used */
list_t zl_lwb_list; /* in-flight log write list */
avl_tree_t zl_bp_tree; /* track bps during log parse */
clock_t zl_replay_time; /* lbolt of when replay started */
uint64_t zl_replay_blks; /* number of log blocks replayed */
zil_header_t zl_old_header; /* debugging aid */
uint_t zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */
uint_t zl_prev_rotor; /* rotor for zl_prev[] */
txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
kmutex_t zl_lwb_io_lock; /* protect following members */
uint64_t zl_lwb_inflight[TXG_SIZE]; /* io issued, but not done */
kcondvar_t zl_lwb_io_cv; /* signal when the flush is done */
uint64_t zl_lwb_max_issued_txg; /* max txg when lwb io issued */
/*
* Max block size for this ZIL. Note that this can not be changed
* while the ZIL is in use because consumers (ZPL/zvol) need to take
* this into account when deciding between WR_COPIED and WR_NEED_COPY
* (see zil_max_copied_data()).
*/
uint64_t zl_max_block_size;
/* Pointer for per dataset zil sums */
zil_sums_t *zl_sums;
};
typedef struct zil_bp_node {
dva_t zn_dva;
avl_node_t zn_node;
} zil_bp_node_t;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZIL_IMPL_H */
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 695bc09e6cb7..6b1352a72b9a 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -1,721 +1,720 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019-2020, Michael Niewöhner
*/
#ifndef _ZIO_H
#define _ZIO_H
#include <sys/zio_priority.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/fs/zfs.h>
#include <sys/zio_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Embedded checksum
*/
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
typedef struct zio_eck {
uint64_t zec_magic; /* for validation, endianness */
zio_cksum_t zec_cksum; /* 256-bit checksum */
} zio_eck_t;
/*
* Gang block headers are self-checksumming and contain an array
* of block pointers.
*/
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t)) / sizeof (blkptr_t))
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t) - \
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
sizeof (uint64_t))
typedef struct zio_gbh {
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
uint64_t zg_filler[SPA_GBH_FILLER];
zio_eck_t zg_tail;
} zio_gbh_phys_t;
enum zio_checksum {
ZIO_CHECKSUM_INHERIT = 0,
ZIO_CHECKSUM_ON,
ZIO_CHECKSUM_OFF,
ZIO_CHECKSUM_LABEL,
ZIO_CHECKSUM_GANG_HEADER,
ZIO_CHECKSUM_ZILOG,
ZIO_CHECKSUM_FLETCHER_2,
ZIO_CHECKSUM_FLETCHER_4,
ZIO_CHECKSUM_SHA256,
ZIO_CHECKSUM_ZILOG2,
ZIO_CHECKSUM_NOPARITY,
ZIO_CHECKSUM_SHA512,
ZIO_CHECKSUM_SKEIN,
ZIO_CHECKSUM_EDONR,
ZIO_CHECKSUM_BLAKE3,
ZIO_CHECKSUM_FUNCTIONS
};
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
#define ZIO_CHECKSUM_MASK 0xffULL
#define ZIO_CHECKSUM_VERIFY (1U << 8)
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
/* macros defining encryption lengths */
#define ZIO_OBJSET_MAC_LEN 32
#define ZIO_DATA_IV_LEN 12
#define ZIO_DATA_SALT_LEN 8
#define ZIO_DATA_MAC_LEN 16
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
/*
* The meaning of "compress = on" selected by the compression features enabled
* on a given pool.
*/
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_ON
#define BOOTFS_COMPRESS_VALID(compress) \
((compress) == ZIO_COMPRESS_LZJB || \
(compress) == ZIO_COMPRESS_LZ4 || \
(compress) == ZIO_COMPRESS_GZIP_1 || \
(compress) == ZIO_COMPRESS_GZIP_2 || \
(compress) == ZIO_COMPRESS_GZIP_3 || \
(compress) == ZIO_COMPRESS_GZIP_4 || \
(compress) == ZIO_COMPRESS_GZIP_5 || \
(compress) == ZIO_COMPRESS_GZIP_6 || \
(compress) == ZIO_COMPRESS_GZIP_7 || \
(compress) == ZIO_COMPRESS_GZIP_8 || \
(compress) == ZIO_COMPRESS_GZIP_9 || \
(compress) == ZIO_COMPRESS_ZLE || \
(compress) == ZIO_COMPRESS_ZSTD || \
(compress) == ZIO_COMPRESS_ON || \
(compress) == ZIO_COMPRESS_OFF)
#define ZIO_COMPRESS_ALGO(x) (x & SPA_COMPRESSMASK)
#define ZIO_COMPRESS_LEVEL(x) ((x & ~SPA_COMPRESSMASK) >> SPA_COMPRESSBITS)
#define ZIO_COMPRESS_RAW(type, level) (type | ((level) << SPA_COMPRESSBITS))
#define ZIO_COMPLEVEL_ZSTD(level) \
ZIO_COMPRESS_RAW(ZIO_COMPRESS_ZSTD, level)
#define ZIO_FAILURE_MODE_WAIT 0
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
typedef enum zio_suspend_reason {
ZIO_SUSPEND_NONE = 0,
ZIO_SUSPEND_IOERR,
ZIO_SUSPEND_MMP,
} zio_suspend_reason_t;
/*
* This was originally an enum type. However, those are 32-bit and there is no
* way to make a 64-bit enum type. Since we ran out of bits for flags, we were
* forced to upgrade it to a uint64_t.
*/
typedef uint64_t zio_flag_t;
/*
* Flags inherited by gang, ddt, and vdev children,
* and that must be equal for two zios to aggregate
*/
#define ZIO_FLAG_DONT_AGGREGATE (1ULL << 0)
#define ZIO_FLAG_IO_REPAIR (1ULL << 1)
#define ZIO_FLAG_SELF_HEAL (1ULL << 2)
#define ZIO_FLAG_RESILVER (1ULL << 3)
#define ZIO_FLAG_SCRUB (1ULL << 4)
#define ZIO_FLAG_SCAN_THREAD (1ULL << 5)
#define ZIO_FLAG_PHYSICAL (1ULL << 6)
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
/*
* Flags inherited by ddt, gang, and vdev children.
*/
#define ZIO_FLAG_CANFAIL (1ULL << 7) /* must be first for INHERIT */
#define ZIO_FLAG_SPECULATIVE (1ULL << 8)
#define ZIO_FLAG_CONFIG_WRITER (1ULL << 9)
#define ZIO_FLAG_DONT_RETRY (1ULL << 10)
-#define ZIO_FLAG_DONT_CACHE (1ULL << 11)
#define ZIO_FLAG_NODATA (1ULL << 12)
#define ZIO_FLAG_INDUCE_DAMAGE (1ULL << 13)
#define ZIO_FLAG_IO_ALLOCATING (1ULL << 14)
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
/*
* Flags inherited by vdev children.
*/
#define ZIO_FLAG_IO_RETRY (1ULL << 15) /* must be first for INHERIT */
#define ZIO_FLAG_PROBE (1ULL << 16)
#define ZIO_FLAG_TRYHARD (1ULL << 17)
#define ZIO_FLAG_OPTIONAL (1ULL << 18)
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
/*
* Flags not inherited by any children.
*/
#define ZIO_FLAG_DONT_QUEUE (1ULL << 19) /* must be first for INHERIT */
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 20)
#define ZIO_FLAG_IO_BYPASS (1ULL << 21)
#define ZIO_FLAG_IO_REWRITE (1ULL << 22)
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 23)
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 24)
#define ZIO_FLAG_GANG_CHILD (1ULL << 25)
#define ZIO_FLAG_DDT_CHILD (1ULL << 26)
#define ZIO_FLAG_GODFATHER (1ULL << 27)
#define ZIO_FLAG_NOPWRITE (1ULL << 28)
#define ZIO_FLAG_REEXECUTED (1ULL << 29)
#define ZIO_FLAG_DELEGATED (1ULL << 30)
#define ZIO_FLAG_FASTWRITE (1ULL << 31)
#define ZIO_FLAG_MUSTSUCCEED 0
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
#define ZIO_DDT_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_GANG_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_VDEV_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_CANFAIL)
#define ZIO_CHILD_BIT(x) (1U << (x))
#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1U << (x)))
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
ZIO_CHILD_DDT,
ZIO_CHILD_LOGICAL,
ZIO_CHILD_TYPES
};
#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
(ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
ZIO_WAIT_TYPES
};
typedef void zio_done_func_t(zio_t *zio);
extern int zio_exclude_metadata;
extern int zio_dva_throttle_enabled;
extern const char *const zio_type_name[ZIO_TYPES];
/*
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
* identifies any block in the pool. By convention, the meta-objset (MOS)
* is objset 0, and the meta-dnode is object 0. This covers all blocks
* except root blocks and ZIL blocks, which are defined as follows:
*
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
*
* Note: this structure is called a bookmark because its original purpose
* was to remember where to resume a pool-wide traverse.
*
* Note: this structure is passed between userland and the kernel, and is
* stored on disk (by virtue of being incorporated into other on-disk
* structures, e.g. dsl_scan_phys_t).
*
* If the head_errlog feature is enabled a different on-disk format for error
* logs is used. This introduces the use of an error bookmark, a four-tuple
* <object, level, blkid, birth> that uniquely identifies any error block
* in the pool. The birth transaction group is used to track whether the block
* has been overwritten by newer data or added to a snapshot since its marking
* as an error.
*/
struct zbookmark_phys {
uint64_t zb_objset;
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
};
struct zbookmark_err_phys {
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
uint64_t zb_birth;
};
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
{ \
(zb)->zb_objset = objset; \
(zb)->zb_object = object; \
(zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \
}
#define ZB_DESTROYED_OBJSET (-1ULL)
#define ZB_ROOT_OBJECT (0ULL)
#define ZB_ROOT_LEVEL (-1LL)
#define ZB_ROOT_BLKID (0ULL)
#define ZB_ZIL_OBJECT (0ULL)
#define ZB_ZIL_LEVEL (-2LL)
#define ZB_DNODE_LEVEL (-3LL)
#define ZB_DNODE_BLKID (0ULL)
#define ZB_IS_ZERO(zb) \
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
#define ZB_IS_ROOT(zb) \
((zb)->zb_object == ZB_ROOT_OBJECT && \
(zb)->zb_level == ZB_ROOT_LEVEL && \
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
enum zio_checksum zp_checksum;
enum zio_compress zp_compress;
uint8_t zp_complevel;
dmu_object_type_t zp_type;
uint8_t zp_level;
uint8_t zp_copies;
boolean_t zp_dedup;
boolean_t zp_dedup_verify;
boolean_t zp_nopwrite;
boolean_t zp_brtwrite;
boolean_t zp_encrypt;
boolean_t zp_byteorder;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
uint32_t zp_zpl_smallblk;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
struct dnode_phys;
struct abd;
struct zio_cksum_report {
struct zio_cksum_report *zcr_next;
nvlist_t *zcr_ereport;
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
uint64_t zcr_sector;
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
zio_cksum_free_f *zcr_free;
/* internal use only */
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
};
typedef struct zio_vsd_ops {
zio_done_func_t *vsd_free;
} zio_vsd_ops_t;
typedef struct zio_gang_node {
zio_gbh_phys_t *gn_gbh;
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
} zio_gang_node_t;
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
typedef struct zio_transform {
struct abd *zt_orig_abd;
uint64_t zt_orig_size;
uint64_t zt_bufsize;
zio_transform_func_t *zt_transform;
struct zio_transform *zt_next;
} zio_transform_t;
typedef zio_t *zio_pipe_stage_t(zio_t *zio);
/*
* The io_reexecute flags are distinct from io_flags because the child must
* be able to propagate them to the parent. The normal io_flags are local
* to the zio, not protected by any lock, and not modifiable by children;
* the reexecute flags are protected by io_lock, modifiable by children,
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
*/
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
/*
* The io_trim flags are used to specify the type of TRIM to perform. They
* only apply to ZIO_TYPE_TRIM zios are distinct from io_flags.
*/
enum trim_flag {
ZIO_TRIM_SECURE = 1U << 0,
};
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
list_node_t zl_parent_node;
list_node_t zl_child_node;
} zio_link_t;
struct zio {
/* Core information about this I/O */
zbookmark_phys_t io_bookmark;
zio_prop_t io_prop;
zio_type_t io_type;
enum zio_child io_child_type;
enum trim_flag io_trim_flags;
int io_cmd;
zio_priority_t io_priority;
uint8_t io_reexecute;
uint8_t io_state[ZIO_WAIT_TYPES];
uint64_t io_txg;
spa_t *io_spa;
blkptr_t *io_bp;
blkptr_t *io_bp_override;
blkptr_t io_bp_copy;
list_t io_parent_list;
list_t io_child_list;
zio_t *io_logical;
zio_transform_t *io_transform_stack;
/* Callback info */
zio_done_func_t *io_ready;
zio_done_func_t *io_children_ready;
zio_done_func_t *io_physdone;
zio_done_func_t *io_done;
void *io_private;
int64_t io_prev_space_delta; /* DMU private */
blkptr_t io_bp_orig;
/* io_lsize != io_orig_size iff this is a raw write */
uint64_t io_lsize;
/* Data represented by this I/O */
struct abd *io_abd;
struct abd *io_orig_abd;
uint64_t io_size;
uint64_t io_orig_size;
/* Stuff for the vdev stack */
vdev_t *io_vd;
void *io_vsd;
const zio_vsd_ops_t *io_vsd_ops;
metaslab_class_t *io_metaslab_class; /* dva throttle class */
uint64_t io_offset;
hrtime_t io_timestamp; /* submitted at */
hrtime_t io_queued_timestamp;
hrtime_t io_target_timestamp;
hrtime_t io_delta; /* vdev queue service delta */
hrtime_t io_delay; /* Device access time (disk or */
/* file). */
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
zio_flag_t io_flags;
enum zio_stage io_stage;
enum zio_stage io_pipeline;
zio_flag_t io_orig_flags;
enum zio_stage io_orig_stage;
enum zio_stage io_orig_pipeline;
enum zio_stage io_pipeline_trace;
int io_error;
int io_child_error[ZIO_CHILD_TYPES];
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
uint64_t io_child_count;
uint64_t io_phys_children;
uint64_t io_parent_count;
uint64_t *io_stall;
zio_t *io_gang_leader;
zio_gang_node_t *io_gang_tree;
void *io_executor;
void *io_waiter;
void *io_bio;
kmutex_t io_lock;
kcondvar_t io_cv;
int io_allocator;
/* FMA state */
zio_cksum_report_t *io_cksum_report;
uint64_t io_ena;
/* Taskq dispatching state */
taskq_ent_t io_tqent;
};
enum blk_verify_flag {
BLK_VERIFY_ONLY,
BLK_VERIFY_LOG,
BLK_VERIFY_HALT
};
enum blk_config_flag {
BLK_CONFIG_HELD, // SCL_VDEV held for writer
BLK_CONFIG_NEEDED, // SCL_VDEV should be obtained for reader
BLK_CONFIG_SKIP, // skip checks which require SCL_VDEV
};
extern int zio_bookmark_compare(const void *, const void *);
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_root(spa_t *spa,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern void zio_destroy(zio_t *zio);
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *priv,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb);
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *priv, zio_priority_t priority, zio_flag_t flags,
const zbookmark_phys_t *zb);
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, zio_done_func_t *done, void *priv,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb);
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
boolean_t nopwrite, boolean_t brtwrite);
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *priv, zio_flag_t flags);
extern zio_t *zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags);
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, boolean_t labels);
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
zio_flag_t flags, boolean_t labels);
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, zio_flag_t flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
extern int zio_wait(zio_t *zio);
extern void zio_nowait(zio_t *zio);
extern void zio_execute(void *zio);
extern void zio_interrupt(void *zio);
extern void zio_delay_init(zio_t *zio);
extern void zio_delay_interrupt(zio_t *zio);
extern void zio_deadman(zio_t *zio, const char *tag);
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
extern zio_t *zio_unique_parent(zio_t *cio);
extern void zio_add_child(zio_t *pio, zio_t *cio);
extern void *zio_buf_alloc(size_t size);
extern void zio_buf_free(void *buf, size_t size);
extern void *zio_data_buf_alloc(size_t size);
extern void zio_data_buf_free(void *buf, size_t size);
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
uint64_t bufsize, zio_transform_func_t *transform);
extern void zio_pop_transforms(zio_t *zio);
extern void zio_resubmit_stage_async(void *);
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
uint64_t offset, struct abd *data, uint64_t size, int type,
zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *priv);
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
struct abd *data, uint64_t size, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *priv);
extern void zio_vdev_io_bypass(zio_t *zio);
extern void zio_vdev_io_reissue(zio_t *zio);
extern void zio_vdev_io_redone(zio_t *zio);
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
extern void zio_checksum_verified(zio_t *zio);
extern int zio_worst_error(int e1, int e2);
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
enum zio_checksum parent);
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
enum zio_checksum child, enum zio_checksum parent);
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
extern uint8_t zio_complevel_select(spa_t *spa, enum zio_compress compress,
uint8_t child, uint8_t parent);
extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
extern boolean_t zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify);
/*
* Initial setup and teardown.
*/
extern void zio_init(void);
extern void zio_fini(void);
/*
* Fault injection
*/
struct zinject_record;
extern uint32_t zio_injection_enabled;
extern int zio_inject_fault(char *name, int flags, int *id,
struct zinject_record *record);
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
struct zinject_record *record);
extern int zio_clear_fault(int id);
extern void zio_handle_panic_injection(spa_t *spa, const char *tag,
uint64_t type);
extern int zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
uint64_t type, int error);
extern int zio_handle_fault_injection(zio_t *zio, int error);
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
int err2);
extern int zio_handle_label_injection(zio_t *zio, int error);
extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
/*
* Checksum ereport functions
*/
extern int zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern int zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, const abd_t *good_data, const abd_t *bad_data,
struct zio_bad_cksum *info);
void zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr);
extern void zfs_ereport_snapshot_post(const char *subclass, spa_t *spa,
const char *name);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
/* zbookmark_phys functions */
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
boolean_t zbookmark_subtree_tbd(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_H */
diff --git a/sys/contrib/openzfs/lib/libspl/include/umem.h b/sys/contrib/openzfs/lib/libspl/include/umem.h
index 77c216721253..9039212baf14 100644
--- a/sys/contrib/openzfs/lib/libspl/include/umem.h
+++ b/sys/contrib/openzfs/lib/libspl/include/umem.h
@@ -1,229 +1,230 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _LIBSPL_UMEM_H
#define _LIBSPL_UMEM_H
/*
* XXX: We should use the real portable umem library if it is detected
* at configure time. However, if the library is not available, we can
* use a trivial malloc based implementation. This obviously impacts
* performance, but unless you are using a full userspace build of zpool for
* something other than ztest, you are likely not going to notice or care.
*
* https://labs.omniti.com/trac/portableumem
*/
#include <sys/debug.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef void vmem_t;
/*
* Flags for umem_alloc/umem_free
*/
#define UMEM_DEFAULT 0x0000 /* normal -- may fail */
#define UMEM_NOFAIL 0x0100 /* Never fails */
/*
* Flags for umem_cache_create()
*/
#define UMC_NODEBUG 0x00020000
#define UMEM_CACHE_NAMELEN 31
typedef int umem_nofail_callback_t(void);
typedef int umem_constructor_t(void *, void *, int);
typedef void umem_destructor_t(void *, void *);
typedef void umem_reclaim_t(void *);
typedef struct umem_cache {
char cache_name[UMEM_CACHE_NAMELEN + 1];
size_t cache_bufsize;
size_t cache_align;
umem_constructor_t *cache_constructor;
umem_destructor_t *cache_destructor;
umem_reclaim_t *cache_reclaim;
void *cache_private;
void *cache_arena;
int cache_cflags;
} umem_cache_t;
/* Prototypes for functions to provide defaults for umem envvars */
const char *_umem_debug_init(void);
const char *_umem_options_init(void);
const char *_umem_logging_init(void);
-__attribute__((alloc_size(1)))
+__attribute__((malloc, alloc_size(1)))
static inline void *
umem_alloc(size_t size, int flags)
{
void *ptr = NULL;
do {
ptr = malloc(size);
} while (ptr == NULL && (flags & UMEM_NOFAIL));
return (ptr);
}
-__attribute__((alloc_size(1)))
+__attribute__((malloc, alloc_size(1)))
static inline void *
umem_alloc_aligned(size_t size, size_t align, int flags)
{
void *ptr = NULL;
int rc = EINVAL;
do {
rc = posix_memalign(&ptr, align, size);
} while (rc == ENOMEM && (flags & UMEM_NOFAIL));
if (rc == EINVAL) {
fprintf(stderr, "%s: invalid memory alignment (%zd)\n",
__func__, align);
if (flags & UMEM_NOFAIL)
abort();
return (NULL);
}
return (ptr);
}
-__attribute__((alloc_size(1)))
+__attribute__((malloc, alloc_size(1)))
static inline void *
umem_zalloc(size_t size, int flags)
{
void *ptr = NULL;
ptr = umem_alloc(size, flags);
if (ptr)
memset(ptr, 0, size);
return (ptr);
}
static inline void
umem_free(const void *ptr, size_t size __maybe_unused)
{
free((void *)ptr);
}
/*
* umem_free_aligned was added for supporting portability
* with non-POSIX platforms that require a different free
* to be used with aligned allocations.
*/
static inline void
umem_free_aligned(void *ptr, size_t size __maybe_unused)
{
#ifndef _WIN32
free((void *)ptr);
#else
_aligned_free(ptr);
#endif
}
static inline void
umem_nofail_callback(umem_nofail_callback_t *cb __maybe_unused)
{}
static inline umem_cache_t *
umem_cache_create(
const char *name, size_t bufsize, size_t align,
umem_constructor_t *constructor,
umem_destructor_t *destructor,
umem_reclaim_t *reclaim,
void *priv, void *vmp, int cflags)
{
umem_cache_t *cp;
cp = (umem_cache_t *)umem_alloc(sizeof (umem_cache_t), UMEM_DEFAULT);
if (cp) {
strlcpy(cp->cache_name, name, UMEM_CACHE_NAMELEN);
cp->cache_bufsize = bufsize;
cp->cache_align = align;
cp->cache_constructor = constructor;
cp->cache_destructor = destructor;
cp->cache_reclaim = reclaim;
cp->cache_private = priv;
cp->cache_arena = vmp;
cp->cache_cflags = cflags;
}
return (cp);
}
static inline void
umem_cache_destroy(umem_cache_t *cp)
{
umem_free(cp, sizeof (umem_cache_t));
}
+__attribute__((malloc))
static inline void *
umem_cache_alloc(umem_cache_t *cp, int flags)
{
void *ptr = NULL;
if (cp->cache_align != 0)
ptr = umem_alloc_aligned(
cp->cache_bufsize, cp->cache_align, flags);
else
ptr = umem_alloc(cp->cache_bufsize, flags);
if (ptr && cp->cache_constructor)
cp->cache_constructor(ptr, cp->cache_private, UMEM_DEFAULT);
return (ptr);
}
static inline void
umem_cache_free(umem_cache_t *cp, void *ptr)
{
if (cp->cache_destructor)
cp->cache_destructor(ptr, cp->cache_private);
if (cp->cache_align != 0)
umem_free_aligned(ptr, cp->cache_bufsize);
else
umem_free(ptr, cp->cache_bufsize);
}
static inline void
umem_cache_reap_now(umem_cache_t *cp __maybe_unused)
{
}
#ifdef __cplusplus
}
#endif
#endif
diff --git a/sys/contrib/openzfs/lib/libzpool/Makefile.am b/sys/contrib/openzfs/lib/libzpool/Makefile.am
index ceac2963e647..58d7f07527aa 100644
--- a/sys/contrib/openzfs/lib/libzpool/Makefile.am
+++ b/sys/contrib/openzfs/lib/libzpool/Makefile.am
@@ -1,214 +1,213 @@
libzpool_la_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS) $(LIBRARY_CFLAGS)
libzpool_la_CFLAGS += $(ZLIB_CFLAGS)
libzpool_la_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
libzpool_la_CPPFLAGS += -I$(srcdir)/include/os/@ac_system_l@/zfs
libzpool_la_CPPFLAGS += -DLIB_ZPOOL_BUILD
lib_LTLIBRARIES += libzpool.la
CPPCHECKTARGETS += libzpool.la
dist_libzpool_la_SOURCES = \
%D%/kernel.c \
%D%/taskq.c \
%D%/util.c
nodist_libzpool_la_SOURCES = \
module/lua/lapi.c \
module/lua/lauxlib.c \
module/lua/lbaselib.c \
module/lua/lcode.c \
module/lua/lcompat.c \
module/lua/lcorolib.c \
module/lua/lctype.c \
module/lua/ldebug.c \
module/lua/ldo.c \
module/lua/lfunc.c \
module/lua/lgc.c \
module/lua/llex.c \
module/lua/lmem.c \
module/lua/lobject.c \
module/lua/lopcodes.c \
module/lua/lparser.c \
module/lua/lstate.c \
module/lua/lstring.c \
module/lua/lstrlib.c \
module/lua/ltable.c \
module/lua/ltablib.c \
module/lua/ltm.c \
module/lua/lvm.c \
module/lua/lzio.c \
\
module/os/linux/zfs/abd_os.c \
module/os/linux/zfs/arc_os.c \
module/os/linux/zfs/trace.c \
module/os/linux/zfs/vdev_file.c \
module/os/linux/zfs/zfs_debug.c \
module/os/linux/zfs/zfs_racct.c \
module/os/linux/zfs/zfs_znode.c \
module/os/linux/zfs/zio_crypt.c \
\
module/zcommon/cityhash.c \
module/zcommon/zfeature_common.c \
module/zcommon/zfs_comutil.c \
module/zcommon/zfs_deleg.c \
module/zcommon/zfs_fletcher.c \
module/zcommon/zfs_fletcher_aarch64_neon.c \
module/zcommon/zfs_fletcher_avx512.c \
module/zcommon/zfs_fletcher_intel.c \
module/zcommon/zfs_fletcher_sse.c \
module/zcommon/zfs_fletcher_superscalar.c \
module/zcommon/zfs_fletcher_superscalar4.c \
module/zcommon/zfs_namecheck.c \
module/zcommon/zfs_prop.c \
module/zcommon/zpool_prop.c \
module/zcommon/zprop_common.c \
\
module/zfs/abd.c \
module/zfs/aggsum.c \
module/zfs/arc.c \
module/zfs/blake3_zfs.c \
module/zfs/blkptr.c \
module/zfs/bplist.c \
module/zfs/bpobj.c \
module/zfs/bptree.c \
module/zfs/bqueue.c \
module/zfs/btree.c \
module/zfs/brt.c \
module/zfs/dbuf.c \
module/zfs/dbuf_stats.c \
module/zfs/ddt.c \
module/zfs/ddt_zap.c \
module/zfs/dmu.c \
module/zfs/dmu_diff.c \
module/zfs/dmu_object.c \
module/zfs/dmu_objset.c \
module/zfs/dmu_recv.c \
module/zfs/dmu_redact.c \
module/zfs/dmu_send.c \
module/zfs/dmu_traverse.c \
module/zfs/dmu_tx.c \
module/zfs/dmu_zfetch.c \
module/zfs/dnode.c \
module/zfs/dnode_sync.c \
module/zfs/dsl_bookmark.c \
module/zfs/dsl_crypt.c \
module/zfs/dsl_dataset.c \
module/zfs/dsl_deadlist.c \
module/zfs/dsl_deleg.c \
module/zfs/dsl_destroy.c \
module/zfs/dsl_dir.c \
module/zfs/dsl_pool.c \
module/zfs/dsl_prop.c \
module/zfs/dsl_scan.c \
module/zfs/dsl_synctask.c \
module/zfs/dsl_userhold.c \
module/zfs/edonr_zfs.c \
module/zfs/fm.c \
module/zfs/gzip.c \
module/zfs/hkdf.c \
module/zfs/lz4.c \
module/zfs/lz4_zfs.c \
module/zfs/lzjb.c \
module/zfs/metaslab.c \
module/zfs/mmp.c \
module/zfs/multilist.c \
module/zfs/objlist.c \
module/zfs/pathname.c \
module/zfs/range_tree.c \
module/zfs/refcount.c \
module/zfs/rrwlock.c \
module/zfs/sa.c \
module/zfs/sha2_zfs.c \
module/zfs/skein_zfs.c \
module/zfs/spa.c \
module/zfs/spa_checkpoint.c \
module/zfs/spa_config.c \
module/zfs/spa_errlog.c \
module/zfs/spa_history.c \
module/zfs/spa_log_spacemap.c \
module/zfs/spa_misc.c \
module/zfs/spa_stats.c \
module/zfs/space_map.c \
module/zfs/space_reftree.c \
module/zfs/txg.c \
module/zfs/uberblock.c \
module/zfs/unique.c \
module/zfs/vdev.c \
- module/zfs/vdev_cache.c \
module/zfs/vdev_draid.c \
module/zfs/vdev_draid_rand.c \
module/zfs/vdev_indirect.c \
module/zfs/vdev_indirect_births.c \
module/zfs/vdev_indirect_mapping.c \
module/zfs/vdev_initialize.c \
module/zfs/vdev_label.c \
module/zfs/vdev_mirror.c \
module/zfs/vdev_missing.c \
module/zfs/vdev_queue.c \
module/zfs/vdev_raidz.c \
module/zfs/vdev_raidz_math.c \
module/zfs/vdev_raidz_math_aarch64_neon.c \
module/zfs/vdev_raidz_math_aarch64_neonx2.c \
module/zfs/vdev_raidz_math_avx2.c \
module/zfs/vdev_raidz_math_avx512bw.c \
module/zfs/vdev_raidz_math_avx512f.c \
module/zfs/vdev_raidz_math_powerpc_altivec.c \
module/zfs/vdev_raidz_math_scalar.c \
module/zfs/vdev_raidz_math_sse2.c \
module/zfs/vdev_raidz_math_ssse3.c \
module/zfs/vdev_rebuild.c \
module/zfs/vdev_removal.c \
module/zfs/vdev_root.c \
module/zfs/vdev_trim.c \
module/zfs/zap.c \
module/zfs/zap_leaf.c \
module/zfs/zap_micro.c \
module/zfs/zcp.c \
module/zfs/zcp_get.c \
module/zfs/zcp_global.c \
module/zfs/zcp_iter.c \
module/zfs/zcp_set.c \
module/zfs/zcp_synctask.c \
module/zfs/zfeature.c \
module/zfs/zfs_byteswap.c \
module/zfs/zfs_chksum.c \
module/zfs/zfs_fm.c \
module/zfs/zfs_fuid.c \
module/zfs/zfs_ratelimit.c \
module/zfs/zfs_rlock.c \
module/zfs/zfs_sa.c \
module/zfs/zil.c \
module/zfs/zio.c \
module/zfs/zio_checksum.c \
module/zfs/zio_compress.c \
module/zfs/zio_inject.c \
module/zfs/zle.c \
module/zfs/zrlock.c \
module/zfs/zthr.c
libzpool_la_LIBADD = \
libicp.la \
libunicode.la \
libnvpair.la \
libzstd.la \
libzutil.la
libzpool_la_LIBADD += $(LIBCLOCK_GETTIME) $(ZLIB_LIBS) -ldl -lm
libzpool_la_LDFLAGS = -pthread
if !ASAN_ENABLED
libzpool_la_LDFLAGS += -Wl,-z,defs
endif
if BUILD_FREEBSD
libzpool_la_LIBADD += -lgeom
endif
libzpool_la_LDFLAGS += -version-info 5:0:0
if TARGET_CPU_POWERPC
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.$(OBJEXT) : CFLAGS += -maltivec
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.l$(OBJEXT): CFLAGS += -maltivec
endif
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 9ec940a94488..5fbd9d7db93f 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2596 +1,2581 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd January 10, 2023
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy UINT64_MAX Ns B Pq u64
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq uint
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq uint
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_mutex_cache_shift Ns = Ns Sy 0 Pq uint
Set the size of the mutex array for the dbuf cache.
When set to
.Sy 0
the array is dynamically sized based on total system memory.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq uint
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq u64
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq u64
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 2 Pq u64
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq u64
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_exclude_special Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether buffers present on special vdevs are eligible for caching
into L2ARC.
If set to 1, exclude dbufs on special vdevs from being cached to L2ARC.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is off,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature, some MRU buffers will still be present
in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq uint
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq u64
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64 MiB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 8388608 Ns B Po 8 MiB Pc Pq u64
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1 GiB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data to each disk
before moving on to the next top-level vdev.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
.It Sy zfs_default_bs Ns = Ns Sy 9 Po 512 B Pc Pq int
Default dnode block size as a power of 2.
.
.It Sy zfs_default_ibs Ns = Ns Sy 17 Po 128 KiB Pc Pq int
Default dnode indirect block size as a power of 2.
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64 MiB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16 MiB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16 MiB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1 hour Pc Pq u64
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq uint
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq uint
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq uint
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
Default lower limit for metaslab size.
.
.It Sy zfs_vdev_max_ms_shift Ns = Ns Sy 34 Po 16 GiB Pc Pq uint
Default upper limit for metaslab size.
.
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint
Maximum ashift used when optimizing for logical \[->] physical sector size on
new
top-level vdevs.
May be increased up to
.Sy ASHIFT_MAX Po 16 Pc ,
but this may negatively impact pool space efficiency.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq uint
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq uint
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq uint
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq uint
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq uint
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq uint
Maximum reference holders being tracked when reference_tracking_enable is
active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq uint
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool
import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq uint
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy spa_upgrade_errlog_limit Ns = Ns Sy 0 Pq uint
Limits the number of on-disk error log entries that will be converted to the
new format when enabling the
.Sy head_errlog
feature.
The default is to convert all log entries.
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512 B Pc Pq u64
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zap_micro_max_size Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq int
Maximum micro ZAP size.
A micro ZAP is upgraded to a fat ZAP, once it grows beyond the specified size.
.
.It Sy zfetch_array_rd_sz Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
If prefetching is enabled, disable prefetching for reads larger than this size.
.
.It Sy zfetch_min_distance Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Min bytes to prefetch per stream.
Prefetch distance starts from the demand access size and quickly grows to
this value, doubling on each hit.
After that it may grow further by 1/8 per hit, but only if some prefetch
since last time haven't completed in time to satisfy demand request, i.e.
prefetch depth didn't cover the read latency or the pool got saturated.
.
.It Sy zfetch_max_distance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 1 Pq uint
Min time before inactive prefetch stream can be reclaimed
.
.It Sy zfetch_max_sec_reap Ns = Ns Sy 2 Pq uint
Max time before inactive prefetch stream can be deleted
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER\-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5 KiB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq u64
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq u64
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8 KiB Pc Pq uint
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1 MiB of hash table per 1 GiB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq uint
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2 KiB
that's evicted,
.Em 1 KiB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq uint
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq uint
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq u64
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
Under Linux, half of system memory will be used as the limit.
Under
.Fx ,
the larger of
.Sy all_system_memory No \- Sy 1 GiB
and
.Sy 5/8 No \(mu Sy all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64 MiB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_balance Ns = Ns Sy 500 Pq uint
Balance between metadata and data on ghost hits.
Values above 100 increase metadata caching by proportionally reducing effect
of ghost data hits on target data/metadata rate.
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq u64
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32 MiB
and
.Sy all_system_memory No / Sy 32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq uint
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq uint
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
Number of arc_prune threads.
.Fx
does not need more than one.
Linux may theoretically use one per mount point up to number of CPUs,
but that was not proven to be useful.
.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq u64
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128 MiB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq uint
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No / Sy 2
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c No >> Sy zfs_arc_overflow_shift ) No \(mu Sy 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq uint
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
.Pp
The default limit of
.Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100 ms per allocation attempt,
even with a small average compressed block size of ~8 KiB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq u64
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq uint
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq uint
Minimum percent of obsolete bytes in vdev mapping required to attempt to
condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete
mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq u64
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4 MiB Pc Pq uint
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/O operations only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1 min Pc Pq u64
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5 min Pc Pq u64
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale No \(mu Sy zfs_dirty_data_max Em must No be smaller than Sy 2^64 .
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay and deadman zevents (which report slow I/O operations) to this
many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1 GiB Pc Pq u64
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq u64
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 131072 Po 128k Pc Pq u64
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 131072
blocks (which means
.Em 16 GiB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128 KiB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/O operations for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq u64
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq u64
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
unflushed metaslabs in the pool.
.
.It Sy zfs_unflushed_log_txg_max Ns = Ns Sy 1000 Pq u64
Tunable limiting maximum time in TXGs any metaslab may remain unflushed.
It effectively limits maximum number of unflushed per-TXG spacemap logs
that need to be read after unclean pool export.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is
available.
This only applies on Linux.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy min(physical_ram/4, 4GiB) ,
or
.Sy min(physical_ram/4, 1GiB)
for 32-bit systems.
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq uint
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq uint
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq uint
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_wrlog_data_max Ns = Pq int
The upper limit of write-transaction zil log data size in bytes.
Write operations are throttled when approaching the limit until log data is
cleared out after transaction group sync.
Because of some overhead, it should be set at least 2 times the size of
.Sy zfs_dirty_data_max
.No to prevent harming normal write throughput .
It also should be smaller than the size of the slog device if slog is present.
.Pp
Defaults to
.Sy zfs_dirty_data_max*2
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
.It Sy zfs_blake3_impl Ns = Ns Sy fastest Pq string
Select a BLAKE3 implementation.
.Pp
Supported selectors are:
.Sy cycle , fastest , generic , sse2 , sse41 , avx2 , avx512 .
All except
.Sy cycle , fastest No and Sy generic
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of BLAKE3 are available, the
.Sy fastest will be chosen using a micro benchmark. You can see the
benchmark results by reading this kstat file:
.Pa /proc/spl/kstat/zfs/chksum_bench .
.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy UINT64_MAX Po unlimited Pc Pq u64
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq u64
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq uint
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq uint
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq uint
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq uint
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 10 Pq uint
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq uint
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq uint
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq uint
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq uint
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_open_timeout_ms Ns = Ns Sy 1000 Pq uint
Timeout value to wait before determining a device is missing
during import.
This is helpful for transient missing paths due
to links being briefly removed and recreated in response to
udev events.
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq uint
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq uint
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq uint
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq uint
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq uint
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq uint
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq uint
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq uint
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq uint
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq uint
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq uint
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operations active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq uint
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq uint
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
Default queue depth for each vdev IO allocator.
Higher values allow for better coalescing of sequential writes before sending
them to the disk, but can increase transaction commit times.
.
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
Defines if the driver should retire on a given error type.
The following options may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 Device No driver retries on device errors
2 Transport No driver retries on transport errors.
4 Driver No driver retries on driver errors.
.TE
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
* 16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_btree_verify_intensity Ns = Ns Sy 0 Pq uint
Enables btree verification.
The following settings are culminative:
.TS
box;
lbz r l l .
Value Description
1 Verify height.
2 Verify pointers from children to parent.
3 Verify element counts.
4 Verify element order. (expensive)
* 5 Verify unused memory is poisoned. (expensive)
.TE
.Sy \& * No Requires debug build .
.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq uint
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq uint
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq s64
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq u64
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq u64
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask \(em
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq u64
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100 MiB Pc Pq u64
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq u64
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq u64
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 16777216 Po 16 MiB Pc Pq uint
We currently support block sizes from
.Em 512 Po 512 B Pc No to Em 16777216 Po 16 MiB Pc .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we formerly forbade creating blocks larger than 1M.
Larger blocks could be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq u64
Minimum number of metaslabs to flush per dirty TXG.
.
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq uint
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq uint
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq uint
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for
allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest multihost updates will be available
in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq u64
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval No / Sy leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval No \(mu Sy zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100 ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals No \(mu Sy zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to sync data when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used allowing holes in a file to be accurately reported.
When disabled holes will not be reported in recently dirtied files.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50 MiB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq uint
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 30 Ns % Pq u64
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle .
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch
.Pq for, e.g., Nm zfs Cm send
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 67108864 Ns B Po 64 MiB Pc Pq u64
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard I/O errors during device removal.
When set, if a device encounters a hard I/O error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3 s Pc Pq uint
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scrub_error_blocks_per_txg Ns = Ns Sy 4096 Pq uint
Error blocks to be scrubbed in one txg.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2 hour Pc Pq uint
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq uint
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no effect on scrub or resilver
performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq uint
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint .
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2 MiB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq uint
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification
I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_report_txgs Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When reporting resilver throughput and estimated completion time use the
performance observed over roughly the last
.Sy zfs_scan_report_txgs
TXGs.
When set to zero performance is calculated over the time between checkpoints.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^\-1 Pq uint
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32 MiB .
.
.It Sy zfs_recv_best_effort_corrective Ns = Ns Sy 0 Pq int
When this variable is set to non-zero a corrective receive:
.Bl -enum -compact -offset 4n -width "1."
.It
Does not enforce the restriction of source & destination snapshot GUIDs
matching.
.It
If there is an error during healing, the healing receive is not
terminated instead it moves on to the next record.
.El
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq uint
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq uint
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this
value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq uint
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128 KiB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128 KiB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this
size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq uint
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
This controls the number of threads used by
.Sy dp_sync_taskq .
The default value of
.Sy 75%
will create a maximum of one thread per CPU.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before
issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned
devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory
usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq uint
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq uint
Flush dirty data to disk at least every this many seconds (maximum TXG
duration).
.
.It Sy zfs_vdev_aggregate_trim Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Allow TRIM I/O operations to be aggregated.
This is normally not helpful because the extents to be trimmed
will have been already been aggregated by the metaslab.
This option is provided for debugging and performance analysis.
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq uint
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
Max vdev I/O aggregation size for non-rotating media.
.
-.It Sy zfs_vdev_cache_bshift Ns = Ns Sy 16 Po 64 KiB Pc Pq uint
-Shift size to inflate reads to.
-.
-.It Sy zfs_vdev_cache_max Ns = Ns Sy 16384 Ns B Po 16 KiB Pc Pq uint
-Inflate reads smaller than this value to meet the
-.Sy zfs_vdev_cache_bshift
-size
-.Pq default Sy 64 KiB .
-.
-.It Sy zfs_vdev_cache_size Ns = Ns Sy 0 Pq uint
-Total size of the per-disk cache in bytes.
-.Pp
-Currently this feature is disabled, as it has been found to not be helpful
-for performance and in some cases harmful.
-.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32 KiB Pc Pq uint
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4 KiB Pc Pq uint
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq uint
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15 min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36 KiB
can improve performance.
.
.It Sy zil_min_commit_timeout Ns = Ns Sy 5000 Pq u64
This sets the minimum delay in nanoseconds ZIL care to delay block commit,
waiting for more records.
If ZIL writes are too fast, kernel may not be able sleep for so short interval,
increasing log latency above allowed by
.Sy zfs_commit_timeout_pct .
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 786432 Ns B Po 768 KiB Pc Pq u64
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_zil_saxattr Ns = Ns Sy 1 Ns | Ns 0 Pq int
Setting this tunable to zero disables ZIL logging of new
.Sy xattr Ns = Ns Sy sa
records if the
.Sy org.openzfs:zilsaxattr
feature is enabled on the pool.
This would only be necessary to work around bugs in the ZIL logging or replay
code for this record type.
The tunable has no effect if the feature is disabled.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq uint
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the
ZIL.
.
.It Sy zstd_earlyabort_pass Ns = Ns Sy 1 Pq uint
Whether heuristic for detection of incompressible data with zstd levels >= 3
using LZ4 and zstd-1 passes is enabled.
.
.It Sy zstd_abort_size Ns = Ns Sy 131072 Pq uint
Minimal uncompressed size (inclusive) of a record before the early abort
heuristic will be attempted.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30 s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zfs_xattr_compat Ns = Ns 0 Ns | Ns 1 Pq int
Control the naming scheme used when setting new xattrs in the user namespace.
If
.Sy 0
.Pq the default on Linux ,
user namespace xattr names are prefixed with the namespace, to be backwards
compatible with previous versions of ZFS on Linux.
If
.Sy 1
.Pq the default on Fx ,
user namespace xattr names are not prefixed, to be backwards compatible with
previous versions of ZFS on illumos and
.Fx .
.Pp
Either naming scheme can be read on this and future versions of ZFS, regardless
of this tunable, but legacy ZFS on illumos or
.Fx
are unable to read user namespace xattrs written in the Linux format, and
legacy versions of ZFS on Linux are unable to read user namespace xattrs written
in the legacy ZFS format.
.Pp
An existing xattr with the alternate naming scheme is removed when overwriting
the xattr so as to not accumulate duplicates.
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression and
checksum calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Lower values improve I/O ordering and CPU utilization,
while higher reduces lock contention.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq long
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128 KiB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.Sy zvol_request_sync
is ignored when running on a kernel that supports block multiqueue
.Pq Li blk-mq .
.
.It Sy zvol_threads Ns = Ns Sy 0 Pq uint
The number of system wide threads to use for processing zvol block IOs.
If
.Sy 0
(the default) then internally set
.Sy zvol_threads
to the number of CPUs present or 32 (whichever is greater).
.
.It Sy zvol_blk_mq_threads Ns = Ns Sy 0 Pq uint
The number of threads per zvol to use for queuing IO requests.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
If
.Sy 0
(the default) then internally set
.Sy zvol_blk_mq_threads
to the number of CPUs present.
.
.It Sy zvol_use_blk_mq Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Set to
.Sy 1
to use the
.Li blk-mq
API for zvols.
Set to
.Sy 0
(the default) to use the legacy zvol APIs.
This setting can give better or worse zvol performance depending on
the workload.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only read and assigned to a zvol at zvol load time.
.
.It Sy zvol_blk_mq_blocks_per_thread Ns = Ns Sy 8 Pq uint
If
.Sy zvol_use_blk_mq
is enabled, then process this number of
.Sy volblocksize Ns -sized blocks per zvol thread.
This tunable can be use to favor better performance for zvol reads (lower
values) or writes (higher values).
If set to
.Sy 0 ,
then the zvol layer will process the maximum number of blocks
per thread that it can.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
.
.It Sy zvol_blk_mq_queue_depth Ns = Ns Sy 0 Pq uint
The queue_depth value for the zvol
.Li blk-mq
interface.
This parameter will only appear if your kernel supports
.Li blk-mq
and is only applied at each zvol's load time.
If
.Sy 0
(the default) then use the kernel's default queue depth.
Values are clamped to the kernel's
.Dv BLKDEV_MIN_RQ
and
.Dv BLKDEV_MAX_RQ Ns / Ns Dv BLKDEV_DEFAULT_RQ
limits.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.
.It Sy zvol_enforce_quotas Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Enable strict ZVOL quota enforcement.
The strict quota enforcement may have a performance impact.
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its
maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of simultaneous operations also stabilizes the
response time of operations from other queues, in particular synchronous ones.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there is more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.D1 min_time = min( Ns Sy zfs_delay_scale No \(mu Po Sy dirty No \- Sy min Pc / Po Sy max No \- Sy dirty Pc , 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint
of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500 us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man7/zpool-features.7 b/sys/contrib/openzfs/man/man7/zpool-features.7
index 2b7dcb63829c..b901ce6c2935 100644
--- a/sys/contrib/openzfs/man/man7/zpool-features.7
+++ b/sys/contrib/openzfs/man/man7/zpool-features.7
@@ -1,952 +1,954 @@
.\"
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or https://opensource.org/licenses/CDDL-1.0.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\" Copyright (c) 2019, Klara Inc.
.\" Copyright (c) 2019, Allan Jude
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd June 23, 2022
.Dt ZPOOL-FEATURES 7
.Os
.
.Sh NAME
.Nm zpool-features
.Nd description of ZFS pool features
.
.Sh DESCRIPTION
ZFS pool on-disk format versions are specified via
.Dq features
which replace the old on-disk format numbers
.Pq the last supported on-disk format number is 28 .
To enable a feature on a pool use the
.Nm zpool Cm upgrade ,
or set the
.Sy feature Ns @ Ns Ar feature-name
property to
.Sy enabled .
Please also see the
.Sx Compatibility feature sets
section for information on how sets of features may be enabled together.
.Pp
The pool format does not affect file system version compatibility or the ability
to send file systems between pools.
.Pp
Since most features can be enabled independently of each other, the on-disk
format of the pool is specified by the set of all features marked as
.Sy active
on the pool.
If the pool was created by another software version
this set may include unsupported features.
.
.Ss Identifying features
Every feature has a GUID of the form
.Ar com.example : Ns Ar feature-name .
The reversed DNS name ensures that the feature's GUID is unique across all ZFS
implementations.
When unsupported features are encountered on a pool they will
be identified by their GUIDs.
Refer to the documentation for the ZFS
implementation that created the pool for information about those features.
.Pp
Each supported feature also has a short name.
By convention a feature's short name is the portion of its GUID which follows
the
.Sq \&:
.Po
i.e.
.Ar com.example : Ns Ar feature-name
would have the short name
.Ar feature-name
.Pc ,
however a feature's short name may differ across ZFS implementations if
following the convention would result in name conflicts.
.
.Ss Feature states
Features can be in one of three states:
.Bl -tag -width "disabled"
.It Sy active
This feature's on-disk format changes are in effect on the pool.
Support for this feature is required to import the pool in read-write mode.
If this feature is not read-only compatible,
support is also required to import the pool in read-only mode
.Pq see Sx Read-only compatibility .
.It Sy enabled
An administrator has marked this feature as enabled on the pool, but the
feature's on-disk format changes have not been made yet.
The pool can still be imported by software that does not support this feature,
but changes may be made to the on-disk format at any time
which will move the feature to the
.Sy active
state.
Some features may support returning to the
.Sy enabled
state after becoming
.Sy active .
See feature-specific documentation for details.
.It Sy disabled
This feature's on-disk format changes have not been made and will not be made
unless an administrator moves the feature to the
.Sy enabled
state.
Features cannot be disabled once they have been enabled.
.El
.Pp
The state of supported features is exposed through pool properties of the form
.Sy feature Ns @ Ns Ar short-name .
.
.Ss Read-only compatibility
Some features may make on-disk format changes that do not interfere with other
software's ability to read from the pool.
These features are referred to as
.Dq read-only compatible .
If all unsupported features on a pool are read-only compatible,
the pool can be imported in read-only mode by setting the
.Sy readonly
property during import
.Po see
.Xr zpool-import 8
for details on importing pools
.Pc .
.
.Ss Unsupported features
For each unsupported feature enabled on an imported pool, a pool property
named
.Sy unsupported Ns @ Ns Ar feature-name
will indicate why the import was allowed despite the unsupported feature.
Possible values for this property are:
.Bl -tag -width "readonly"
.It Sy inactive
The feature is in the
.Sy enabled
state and therefore the pool's on-disk
format is still compatible with software that does not support this feature.
.It Sy readonly
The feature is read-only compatible and the pool has been imported in
read-only mode.
.El
.
.Ss Feature dependencies
Some features depend on other features being enabled in order to function.
Enabling a feature will automatically enable any features it depends on.
.
.Ss Compatibility feature sets
It is sometimes necessary for a pool to maintain compatibility with a
specific on-disk format, by enabling and disabling particular features.
The
.Sy compatibility
feature facilitates this by allowing feature sets to be read from text files.
When set to
.Sy off
.Pq the default ,
compatibility feature sets are disabled
.Pq i.e. all features are enabled ;
when set to
.Sy legacy ,
no features are enabled.
When set to a comma-separated list of filenames
.Po
each filename may either be an absolute path, or relative to
.Pa /etc/zfs/compatibility.d
or
.Pa /usr/share/zfs/compatibility.d
.Pc ,
the lists of requested features are read from those files,
separated by whitespace and/or commas.
Only features present in all files are enabled.
.Pp
Simple sanity checks are applied to the files:
they must be between 1 B and 16 KiB in size, and must end with a newline
character.
.Pp
The requested features are applied when a pool is created using
.Nm zpool Cm create Fl o Sy compatibility Ns = Ns Ar …
and controls which features are enabled when using
.Nm zpool Cm upgrade .
.Nm zpool Cm status
will not show a warning about disabled features which are not part
of the requested feature set.
.Pp
The special value
.Sy legacy
prevents any features from being enabled, either via
.Nm zpool Cm upgrade
or
.Nm zpool Cm set Sy feature Ns @ Ns Ar feature-name Ns = Ns Sy enabled .
This setting also prevents pools from being upgraded to newer on-disk versions.
This is a safety measure to prevent new features from being
accidentally enabled, breaking compatibility.
.Pp
By convention, compatibility files in
.Pa /usr/share/zfs/compatibility.d
are provided by the distribution, and include feature sets
supported by important versions of popular distributions, and feature
sets commonly supported at the start of each year.
Compatibility files in
.Pa /etc/zfs/compatibility.d ,
if present, will take precedence over files with the same name in
.Pa /usr/share/zfs/compatibility.d .
.Pp
If an unrecognized feature is found in these files, an error message will
be shown.
If the unrecognized feature is in a file in
.Pa /etc/zfs/compatibility.d ,
this is treated as an error and processing will stop.
If the unrecognized feature is under
.Pa /usr/share/zfs/compatibility.d ,
this is treated as a warning and processing will continue.
This difference is to allow distributions to include features
which might not be recognized by the currently-installed binaries.
.Pp
Compatibility files may include comments:
any text from
.Sq #
to the end of the line is ignored.
.Pp
.Sy Example :
.Bd -literal -compact -offset 4n
.No example# Nm cat Pa /usr/share/zfs/compatibility.d/grub2
# Features which are supported by GRUB2
async_destroy
bookmarks
embedded_data
empty_bpobj
enabled_txg
extensible_dataset
filesystem_limits
hole_birth
large_blocks
+livelist
lz4_compress
spacemap_histogram
+zpool_checkpoint
.No example# Nm zpool Cm create Fl o Sy compatibility Ns = Ns Ar grub2 Ar bootpool Ar vdev
.Ed
.Pp
See
.Xr zpool-create 8
and
.Xr zpool-upgrade 8
for more information on how these commands are affected by feature sets.
.
.de feature
.It Sy \\$2
.Bl -tag -compact -width "READ-ONLY COMPATIBLE"
.It GUID
.Sy \\$1:\\$2
.if !"\\$4"" \{\
.It DEPENDENCIES
\fB\\$4\fP\c
.if !"\\$5"" , \fB\\$5\fP\c
.if !"\\$6"" , \fB\\$6\fP\c
.if !"\\$7"" , \fB\\$7\fP\c
.if !"\\$8"" , \fB\\$8\fP\c
.if !"\\$9"" , \fB\\$9\fP\c
.\}
.It READ-ONLY COMPATIBLE
\\$3
.El
.Pp
..
.
.ds instant-never \
.No This feature becomes Sy active No as soon as it is enabled \
and will never return to being Sy enabled .
.
.ds remount-upgrade \
.No Each filesystem will be upgraded automatically when remounted, \
or when a new file is created under that filesystem. \
The upgrade can also be triggered on filesystems via \
Nm zfs Cm set Sy version Ns = Ns Sy current Ar fs . \
No The upgrade process runs in the background and may take a while to complete \
for filesystems containing large amounts of files .
.
.de checksum-spiel
When the
.Sy \\$1
feature is set to
.Sy enabled ,
the administrator can turn on the
.Sy \\$1
checksum on any dataset using
.Nm zfs Cm set Sy checksum Ns = Ns Sy \\$1 Ar dset
.Po see Xr zfs-set 8 Pc .
This feature becomes
.Sy active
once a
.Sy checksum
property has been set to
.Sy \\$1 ,
and will return to being
.Sy enabled
once all filesystems that have ever had their checksum set to
.Sy \\$1
are destroyed.
..
.
.Sh FEATURES
The following features are supported on this system:
.Bl -tag -width Ds
.feature org.zfsonlinux allocation_classes yes
This feature enables support for separate allocation classes.
.Pp
This feature becomes
.Sy active
when a dedicated allocation class vdev
.Pq dedup or special
is created with the
.Nm zpool Cm create No or Nm zpool Cm add No commands .
With device removal, it can be returned to the
.Sy enabled
state if all the dedicated allocation class vdevs are removed.
.
.feature com.delphix async_destroy yes
Destroying a file system requires traversing all of its data in order to
return its used space to the pool.
Without
.Sy async_destroy ,
the file system is not fully removed until all space has been reclaimed.
If the destroy operation is interrupted by a reboot or power outage,
the next attempt to open the pool will need to complete the destroy
operation synchronously.
.Pp
When
.Sy async_destroy
is enabled, the file system's data will be reclaimed by a background process,
allowing the destroy operation to complete
without traversing the entire file system.
The background process is able to resume
interrupted destroys after the pool has been opened, eliminating the need
to finish interrupted destroys as part of the open operation.
The amount of space remaining to be reclaimed by the background process
is available through the
.Sy freeing
property.
.Pp
This feature is only
.Sy active
while
.Sy freeing
is non-zero.
.
.feature org.openzfs blake3 no extensible_dataset
This feature enables the use of the BLAKE3 hash algorithm for checksum and
dedup.
BLAKE3 is a secure hash algorithm focused on high performance.
.Pp
.checksum-spiel blake3
.
.feature com.fudosecurity block_cloning yes
When this feature is enabled ZFS will use block cloning for operations like
.Fn copy_file_range 2 .
Block cloning allows to create multiple references to a single block.
It is much faster than copying the data (as the actual data is neither read nor
written) and takes no additional space.
Blocks can be cloned across datasets under some conditions (like disabled
encryption and equal
.Nm recordsize ) .
.Pp
This feature becomes
.Sy active
when first block is cloned.
When the last cloned block is freed, it goes back to the enabled state.
.feature com.delphix bookmarks yes extensible_dataset
This feature enables use of the
.Nm zfs Cm bookmark
command.
.Pp
This feature is
.Sy active
while any bookmarks exist in the pool.
All bookmarks in the pool can be listed by running
.Nm zfs Cm list Fl t Sy bookmark Fl r Ar poolname .
.
.feature com.datto bookmark_v2 no bookmark extensible_dataset
This feature enables the creation and management of larger bookmarks which are
needed for other features in ZFS.
.Pp
This feature becomes
.Sy active
when a v2 bookmark is created and will be returned to the
.Sy enabled
state when all v2 bookmarks are destroyed.
.
.feature com.delphix bookmark_written no bookmark extensible_dataset bookmark_v2
This feature enables additional bookmark accounting fields, enabling the
.Sy written Ns # Ns Ar bookmark
property
.Pq space written since a bookmark
and estimates of send stream sizes for incrementals from bookmarks.
.Pp
This feature becomes
.Sy active
when a bookmark is created and will be
returned to the
.Sy enabled
state when all bookmarks with these fields are destroyed.
.
.feature org.openzfs device_rebuild yes
This feature enables the ability for the
.Nm zpool Cm attach
and
.Nm zpool Cm replace
commands to perform sequential reconstruction
.Pq instead of healing reconstruction
when resilvering.
.Pp
Sequential reconstruction resilvers a device in LBA order without immediately
verifying the checksums.
Once complete, a scrub is started, which then verifies the checksums.
This approach allows full redundancy to be restored to the pool
in the minimum amount of time.
This two-phase approach will take longer than a healing resilver
when the time to verify the checksums is included.
However, unless there is additional pool damage,
no checksum errors should be reported by the scrub.
This feature is incompatible with raidz configurations.
.
This feature becomes
.Sy active
while a sequential resilver is in progress, and returns to
.Sy enabled
when the resilver completes.
.
.feature com.delphix device_removal no
This feature enables the
.Nm zpool Cm remove
command to remove top-level vdevs,
evacuating them to reduce the total size of the pool.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm remove
command is used
on a top-level vdev, and will never return to being
.Sy enabled .
.
.feature org.openzfs draid no
This feature enables use of the
.Sy draid
vdev type.
dRAID is a variant of RAID-Z which provides integrated distributed
hot spares that allow faster resilvering while retaining the benefits of RAID-Z.
Data, parity, and spare space are organized in redundancy groups
and distributed evenly over all of the devices.
.Pp
This feature becomes
.Sy active
when creating a pool which uses the
.Sy draid
vdev type, or when adding a new
.Sy draid
vdev to an existing pool.
.
.feature org.illumos edonr no extensible_dataset
This feature enables the use of the Edon-R hash algorithm for checksum,
including for nopwrite
.Po if compression is also enabled, an overwrite of
a block whose checksum matches the data being written will be ignored
.Pc .
In an abundance of caution, Edon-R requires verification when used with
dedup:
.Nm zfs Cm set Sy dedup Ns = Ns Sy edonr , Ns Sy verify
.Po see Xr zfs-set 8 Pc .
.Pp
Edon-R is a very high-performance hash algorithm that was part
of the NIST SHA-3 competition.
It provides extremely high hash performance
.Pq over 350% faster than SHA-256 ,
but was not selected because of its unsuitability
as a general purpose secure hash algorithm.
This implementation utilizes the new salted checksumming functionality
in ZFS, which means that the checksum is pre-seeded with a secret
256-bit random key
.Pq stored on the pool
before being fed the data block to be checksummed.
Thus the produced checksums are unique to a given pool,
preventing hash collision attacks on systems with dedup.
.Pp
.checksum-spiel edonr
.
.feature com.delphix embedded_data no
This feature improves the performance and compression ratio of
highly-compressible blocks.
Blocks whose contents can compress to 112 bytes
or smaller can take advantage of this feature.
.Pp
When this feature is enabled, the contents of highly-compressible blocks are
stored in the block
.Dq pointer
itself
.Po a misnomer in this case, as it contains
the compressed data, rather than a pointer to its location on disk
.Pc .
Thus the space of the block
.Pq one sector, typically 512 B or 4 KiB
is saved, and no additional I/O is needed to read and write the data block.
.
\*[instant-never]
.
.feature com.delphix empty_bpobj yes
This feature increases the performance of creating and using a large
number of snapshots of a single filesystem or volume, and also reduces
the disk space required.
.Pp
When there are many snapshots, each snapshot uses many Block Pointer
Objects
.Pq bpobjs
to track blocks associated with that snapshot.
However, in common use cases, most of these bpobjs are empty.
This feature allows us to create each bpobj on-demand,
thus eliminating the empty bpobjs.
.Pp
This feature is
.Sy active
while there are any filesystems, volumes,
or snapshots which were created after enabling this feature.
.
.feature com.delphix enabled_txg yes
Once this feature is enabled, ZFS records the transaction group number
in which new features are enabled.
This has no user-visible impact, but other features may depend on this feature.
.Pp
This feature becomes
.Sy active
as soon as it is enabled and will never return to being
.Sy enabled .
.
.feature com.datto encryption no bookmark_v2 extensible_dataset
This feature enables the creation and management of natively encrypted datasets.
.Pp
This feature becomes
.Sy active
when an encrypted dataset is created and will be returned to the
.Sy enabled
state when all datasets that use this feature are destroyed.
.
.feature com.delphix extensible_dataset no
This feature allows more flexible use of internal ZFS data structures,
and exists for other features to depend on.
.Pp
This feature will be
.Sy active
when the first dependent feature uses it, and will be returned to the
.Sy enabled
state when all datasets that use this feature are destroyed.
.
.feature com.joyent filesystem_limits yes extensible_dataset
This feature enables filesystem and snapshot limits.
These limits can be used to control how many filesystems and/or snapshots
can be created at the point in the tree on which the limits are set.
.Pp
This feature is
.Sy active
once either of the limit properties has been set on a dataset
and will never return to being
.Sy enabled .
.
.feature com.delphix head_errlog no
This feature enables the upgraded version of errlog, which required an on-disk
error log format change.
Now the error log of each head dataset is stored separately in the zap object
and keyed by the head id.
With this feature enabled, every dataset affected by an error block is listed
in the output of
.Nm zpool Cm status .
In case of encrypted filesystems with unloaded keys we are unable to check
their snapshots or clones for errors and these will not be reported.
An "access denied" error will be reported.
.Pp
\*[instant-never]
.
.feature com.delphix hole_birth no enabled_txg
This feature has/had bugs, the result of which is that, if you do a
.Nm zfs Cm send Fl i
.Pq or Fl R , No since it uses Fl i
from an affected dataset, the receiving party will not see any checksum
or other errors, but the resulting destination snapshot
will not match the source.
Its use by
.Nm zfs Cm send Fl i
has been disabled by default
.Po
see
.Sy send_holes_without_birth_time
in
.Xr zfs 4
.Pc .
.Pp
This feature improves performance of incremental sends
.Pq Nm zfs Cm send Fl i
and receives for objects with many holes.
The most common case of hole-filled objects is zvols.
.Pp
An incremental send stream from snapshot
.Sy A No to snapshot Sy B
contains information about every block that changed between
.Sy A No and Sy B .
Blocks which did not change between those snapshots can be
identified and omitted from the stream using a piece of metadata called
the
.Dq block birth time ,
but birth times are not recorded for holes
.Pq blocks filled only with zeroes .
Since holes created after
.Sy A No cannot be distinguished from holes created before Sy A ,
information about every hole in the entire filesystem or zvol
is included in the send stream.
.Pp
For workloads where holes are rare this is not a problem.
However, when incrementally replicating filesystems or zvols with many holes
.Pq for example a zvol formatted with another filesystem
a lot of time will be spent sending and receiving unnecessary information
about holes that already exist on the receiving side.
.Pp
Once the
.Sy hole_birth
feature has been enabled the block birth times
of all new holes will be recorded.
Incremental sends between snapshots created after this feature is enabled
will use this new metadata to avoid sending information about holes that
already exist on the receiving side.
.Pp
\*[instant-never]
.
.feature org.open-zfs large_blocks no extensible_dataset
This feature allows the record size on a dataset to be set larger than 128 KiB.
.Pp
This feature becomes
.Sy active
once a dataset contains a file with a block size larger than 128 KiB,
and will return to being
.Sy enabled
once all filesystems that have ever had their recordsize larger than 128 KiB
are destroyed.
.
.feature org.zfsonlinux large_dnode no extensible_dataset
This feature allows the size of dnodes in a dataset to be set larger than 512 B.
.
This feature becomes
.Sy active
once a dataset contains an object with a dnode larger than 512 B,
which occurs as a result of setting the
.Sy dnodesize
dataset property to a value other than
.Sy legacy .
The feature will return to being
.Sy enabled
once all filesystems that have ever contained a dnode larger than 512 B
are destroyed.
Large dnodes allow more data to be stored in the bonus buffer,
thus potentially improving performance by avoiding the use of spill blocks.
.
.feature com.delphix livelist yes
This feature allows clones to be deleted faster than the traditional method
when a large number of random/sparse writes have been made to the clone.
All blocks allocated and freed after a clone is created are tracked by the
the clone's livelist which is referenced during the deletion of the clone.
The feature is activated when a clone is created and remains
.Sy active
until all clones have been destroyed.
.
.feature com.delphix log_spacemap yes com.delphix:spacemap_v2
This feature improves performance for heavily-fragmented pools,
especially when workloads are heavy in random-writes.
It does so by logging all the metaslab changes on a single spacemap every TXG
instead of scattering multiple writes to all the metaslab spacemaps.
.Pp
\*[instant-never]
.
.feature org.illumos lz4_compress no
.Sy lz4
is a high-performance real-time compression algorithm that
features significantly faster compression and decompression as well as a
higher compression ratio than the older
.Sy lzjb
compression.
Typically,
.Sy lz4
compression is approximately 50% faster on compressible data and 200% faster
on incompressible data than
.Sy lzjb .
It is also approximately 80% faster on decompression,
while giving approximately a 10% better compression ratio.
.Pp
When the
.Sy lz4_compress
feature is set to
.Sy enabled ,
the administrator can turn on
.Sy lz4
compression on any dataset on the pool using the
.Xr zfs-set 8
command.
All newly written metadata will be compressed with the
.Sy lz4
algorithm.
.Pp
\*[instant-never]
.
.feature com.joyent multi_vdev_crash_dump no
This feature allows a dump device to be configured with a pool comprised
of multiple vdevs.
Those vdevs may be arranged in any mirrored or raidz configuration.
.Pp
When the
.Sy multi_vdev_crash_dump
feature is set to
.Sy enabled ,
the administrator can use
.Xr dumpadm 8
to configure a dump device on a pool comprised of multiple vdevs.
.Pp
Under
.Fx
and Linux this feature is unused, but registered for compatibility.
New pools created on these systems will have the feature
.Sy enabled
but will never transition to
.Sy active ,
as this functionality is not required for crash dump support.
Existing pools where this feature is
.Sy active
can be imported.
.
.feature com.delphix obsolete_counts yes device_removal
This feature is an enhancement of
.Sy device_removal ,
which will over time reduce the memory used to track removed devices.
When indirect blocks are freed or remapped,
we note that their part of the indirect mapping is
.Dq obsolete
– no longer needed.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm remove
command is used on a top-level vdev, and will never return to being
.Sy enabled .
.
.feature org.zfsonlinux project_quota yes extensible_dataset
This feature allows administrators to account the spaces and objects usage
information against the project identifier
.Pq ID .
.Pp
The project ID is an object-based attribute.
When upgrading an existing filesystem,
objects without a project ID will be assigned a zero project ID.
When this feature is enabled, newly created objects inherit
their parent directories' project ID if the parent's inherit flag is set
.Pq via Nm chattr Sy [+-]P No or Nm zfs Cm project Fl s Ns | Ns Fl C .
Otherwise, the new object's project ID will be zero.
An object's project ID can be changed at any time by the owner
.Pq or privileged user
via
.Nm chattr Fl p Ar prjid
or
.Nm zfs Cm project Fl p Ar prjid .
.Pp
This feature will become
.Sy active
as soon as it is enabled and will never return to being
.Sy disabled .
\*[remount-upgrade]
.
.feature com.delphix redaction_bookmarks no bookmarks extensible_dataset
This feature enables the use of redacted
.Nm zfs Cm send Ns s ,
which create redaction bookmarks storing the list of blocks
redacted by the send that created them.
For more information about redacted sends, see
.Xr zfs-send 8 .
.
.feature com.delphix redacted_datasets no extensible_dataset
This feature enables the receiving of redacted
.Nm zfs Cm send
streams, which create redacted datasets when received.
These datasets are missing some of their blocks,
and so cannot be safely mounted, and their contents cannot be safely read.
For more information about redacted receives, see
.Xr zfs-send 8 .
.
.feature com.datto resilver_defer yes
This feature allows ZFS to postpone new resilvers if an existing one is already
in progress.
Without this feature, any new resilvers will cause the currently
running one to be immediately restarted from the beginning.
.Pp
This feature becomes
.Sy active
once a resilver has been deferred, and returns to being
.Sy enabled
when the deferred resilver begins.
.
.feature org.illumos sha512 no extensible_dataset
This feature enables the use of the SHA-512/256 truncated hash algorithm
.Pq FIPS 180-4
for checksum and dedup.
The native 64-bit arithmetic of SHA-512 provides an approximate 50%
performance boost over SHA-256 on 64-bit hardware
and is thus a good minimum-change replacement candidate
for systems where hash performance is important,
but these systems cannot for whatever reason utilize the faster
.Sy skein No and Sy edonr
algorithms.
.Pp
.checksum-spiel sha512
.
.feature org.illumos skein no extensible_dataset
This feature enables the use of the Skein hash algorithm for checksum and dedup.
Skein is a high-performance secure hash algorithm that was a
finalist in the NIST SHA-3 competition.
It provides a very high security margin and high performance on 64-bit hardware
.Pq 80% faster than SHA-256 .
This implementation also utilizes the new salted checksumming
functionality in ZFS, which means that the checksum is pre-seeded with a
secret 256-bit random key
.Pq stored on the pool
before being fed the data block to be checksummed.
Thus the produced checksums are unique to a given pool,
preventing hash collision attacks on systems with dedup.
.Pp
.checksum-spiel skein
.
.feature com.delphix spacemap_histogram yes
This features allows ZFS to maintain more information about how free space
is organized within the pool.
If this feature is
.Sy enabled ,
it will be activated when a new space map object is created, or
an existing space map is upgraded to the new format,
and never returns back to being
.Sy enabled .
.
.feature com.delphix spacemap_v2 yes
This feature enables the use of the new space map encoding which
consists of two words
.Pq instead of one
whenever it is advantageous.
The new encoding allows space maps to represent large regions of
space more efficiently on-disk while also increasing their maximum
addressable offset.
.Pp
This feature becomes
.Sy active
once it is
.Sy enabled ,
and never returns back to being
.Sy enabled .
.
.feature org.zfsonlinux userobj_accounting yes extensible_dataset
This feature allows administrators to account the object usage information
by user and group.
.Pp
\*[instant-never]
\*[remount-upgrade]
.
.feature com.klarasystems vdev_zaps_v2 no
This feature creates a ZAP object for the root vdev.
.Pp
This feature becomes active after the next
.Nm zpool Cm import
or
.Nm zpool reguid .
.
Properties can be retrieved or set on the root vdev using
.Nm zpool Cm get
and
.Nm zpool Cm set
with
.Sy root
as the vdev name which is an alias for
.Sy root-0 .
.feature org.openzfs zilsaxattr yes extensible_dataset
This feature enables
.Sy xattr Ns = Ns Sy sa
extended attribute logging in the ZIL.
If enabled, extended attribute changes
.Pq both Sy xattrdir Ns = Ns Sy dir No and Sy xattr Ns = Ns Sy sa
are guaranteed to be durable if either the dataset had
.Sy sync Ns = Ns Sy always
set at the time the changes were made, or
.Xr sync 2
is called on the dataset after the changes were made.
.Pp
This feature becomes
.Sy active
when a ZIL is created for at least one dataset and will be returned to the
.Sy enabled
state when it is destroyed for all datasets that use this feature.
.
.feature com.delphix zpool_checkpoint yes
This feature enables the
.Nm zpool Cm checkpoint
command that can checkpoint the state of the pool
at the time it was issued and later rewind back to it or discard it.
.Pp
This feature becomes
.Sy active
when the
.Nm zpool Cm checkpoint
command is used to checkpoint the pool.
The feature will only return back to being
.Sy enabled
when the pool is rewound or the checkpoint has been discarded.
.
.feature org.freebsd zstd_compress no extensible_dataset
.Sy zstd
is a high-performance compression algorithm that features a
combination of high compression ratios and high speed.
Compared to
.Sy gzip ,
.Sy zstd
offers slightly better compression at much higher speeds.
Compared to
.Sy lz4 ,
.Sy zstd
offers much better compression while being only modestly slower.
Typically,
.Sy zstd
compression speed ranges from 250 to 500 MB/s per thread
and decompression speed is over 1 GB/s per thread.
.Pp
When the
.Sy zstd
feature is set to
.Sy enabled ,
the administrator can turn on
.Sy zstd
compression of any dataset using
.Nm zfs Cm set Sy compress Ns = Ns Sy zstd Ar dset
.Po see Xr zfs-set 8 Pc .
This feature becomes
.Sy active
once a
.Sy compress
property has been set to
.Sy zstd ,
and will return to being
.Sy enabled
once all filesystems that have ever had their
.Sy compress
property set to
.Sy zstd
are destroyed.
.El
.
.Sh SEE ALSO
.Xr zfs 8 ,
.Xr zpool 8
diff --git a/sys/contrib/openzfs/man/man8/zdb.8 b/sys/contrib/openzfs/man/man8/zdb.8
index 26c67dabd705..031953c543a1 100644
--- a/sys/contrib/openzfs/man/man8/zdb.8
+++ b/sys/contrib/openzfs/man/man8/zdb.8
@@ -1,540 +1,563 @@
.\"
.\" This file and its contents are supplied under the terms of the
.\" Common Development and Distribution License ("CDDL"), version 1.0.
.\" You may only use this file in accordance with the terms of version
.\" 1.0 of the CDDL.
.\"
.\" A full copy of the text of the CDDL should have accompanied this
.\" source. A copy of the CDDL is also available via the Internet at
.\" http://www.illumos.org/license/CDDL.
.\"
.\" Copyright 2012, Richard Lowe.
.\" Copyright (c) 2012, 2019 by Delphix. All rights reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Lawrence Livermore National Security, LLC.
.\" Copyright (c) 2017 Intel Corporation.
.\"
-.Dd October 7, 2020
+.Dd June 4, 2023
.Dt ZDB 8
.Os
.
.Sh NAME
.Nm zdb
.Nd display ZFS storage pool debugging and consistency information
.Sh SYNOPSIS
.Nm
.Op Fl AbcdDFGhikLMNPsvXYy
.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
.Op Fl I Ar inflight-I/O-ops
.Oo Fl o Ar var Ns = Ns Ar value Oc Ns …
.Op Fl t Ar txg
.Op Fl U Ar cache
.Op Fl x Ar dumpdir
.Op Fl K Ar key
.Op Ar poolname Ns Op / Ns Ar dataset Ns | Ns Ar objset-ID
.Op Ar object Ns | Ns Ar range Ns …
.Nm
.Op Fl AdiPv
.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
.Op Fl U Ar cache
.Op Fl K Ar key
.Ar poolname Ns Op Ar / Ns Ar dataset Ns | Ns Ar objset-ID
.Op Ar object Ns | Ns Ar range Ns …
.Nm
+.Fl B
+.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
+.Op Fl U Ar cache
+.Op Fl K Ar key
+.Ar poolname Ns Ar / Ns Ar objset-ID
+.Op Ar backup-flags
+.Nm
.Fl C
.Op Fl A
.Op Fl U Ar cache
.Nm
.Fl E
.Op Fl A
.Ar word0 : Ns Ar word1 Ns :…: Ns Ar word15
.Nm
.Fl l
.Op Fl Aqu
.Ar device
.Nm
.Fl m
.Op Fl AFLPXY
.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
.Op Fl t Ar txg
.Op Fl U Ar cache
.Ar poolname Op Ar vdev Oo Ar metaslab Oc Ns …
.Nm
.Fl O
.Op Fl K Ar key
.Ar dataset path
.Nm
.Fl r
.Op Fl K Ar key
.Ar dataset path destination
.Nm
.Fl R
.Op Fl A
.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
.Op Fl U Ar cache
.Ar poolname vdev : Ns Ar offset : Ns Oo Ar lsize Ns / Oc Ns Ar psize Ns Op : Ns Ar flags
.Nm
.Fl S
.Op Fl AP
.Op Fl e Oo Fl V Oc Oo Fl p Ar path Oc Ns …
.Op Fl U Ar cache
.Ar poolname
.
.Sh DESCRIPTION
The
.Nm
utility displays information about a ZFS pool useful for debugging and performs
some amount of consistency checking.
It is a not a general purpose tool and options
.Pq and facilities
may change.
It is not a
.Xr fsck 8
utility.
.Pp
The output of this command in general reflects the on-disk structure of a ZFS
pool, and is inherently unstable.
The precise output of most invocations is not documented, a knowledge of ZFS
internals is assumed.
.Pp
If the
.Ar dataset
argument does not contain any
.Qq Sy /
or
.Qq Sy @
characters, it is interpreted as a pool name.
The root dataset can be specified as
.Qq Ar pool Ns / .
.Pp
.Nm
is an
.Qq offline
tool; it accesses the block devices underneath the pools directly from
userspace and does not care if the pool is imported or datasets are mounted
(or even if the system understands ZFS at all).
When operating on an imported and active pool it is possible, though unlikely,
that zdb may interpret inconsistent pool data and behave erratically.
.
.Sh OPTIONS
Display options:
.Bl -tag -width Ds
.It Fl b , -block-stats
Display statistics regarding the number, size
.Pq logical, physical and allocated
and deduplication of blocks.
+.It Fl B , -backup
+Generate a backup stream, similar to
+.Nm zfs Cm send ,
+but for the numeric objset ID, and without opening the dataset.
+This can be useful in recovery scenarios if dataset metadata has become
+corrupted but the dataset itself is readable.
+The optional
+.Ar flags
+argument is a string of one or more of the letters
+.Sy e ,
+.Sy L ,
+.Sy c ,
+and
+.Sy w ,
+which correspond to the same flags in
+.Xr zfs-send 8 .
.It Fl c , -checksum
Verify the checksum of all metadata blocks while printing block statistics
.Po see
.Fl b
.Pc .
.Pp
If specified multiple times, verify the checksums of all blocks.
.It Fl C , -config
Display information about the configuration.
If specified with no other options, instead display information about the cache
file
.Pq Pa /etc/zfs/zpool.cache .
To specify the cache file to display, see
.Fl U .
.Pp
If specified multiple times, and a pool name is also specified display both the
cached configuration and the on-disk configuration.
If specified multiple times with
.Fl e
also display the configuration that would be used were the pool to be imported.
.It Fl d , -datasets
Display information about datasets.
Specified once, displays basic dataset information: ID, create transaction,
size, and object count.
See
.Fl N
for determining if
.Ar poolname Ns Op / Ns Ar dataset Ns | Ns Ar objset-ID
is to use the specified
.Ar dataset Ns | Ns Ar objset-ID
as a string (dataset name) or a number (objset ID) when
datasets have numeric names.
.Pp
If specified multiple times provides greater and greater verbosity.
.Pp
If object IDs or object ID ranges are specified, display information about
those specific objects or ranges only.
.Pp
An object ID range is specified in terms of a colon-separated tuple of
the form
.Ao start Ac : Ns Ao end Ac Ns Op : Ns Ao flags Ac .
The fields
.Ar start
and
.Ar end
are integer object identifiers that denote the upper and lower bounds
of the range.
An
.Ar end
value of -1 specifies a range with no upper bound.
The
.Ar flags
field optionally specifies a set of flags, described below, that control
which object types are dumped.
By default, all object types are dumped.
A minus sign
.Pq -
negates the effect of the flag that follows it and has no effect unless
preceded by the
.Ar A
flag.
For example, the range 0:-1:A-d will dump all object types except for
directories.
.Pp
.Bl -tag -compact -width Ds
.It Sy A
Dump all objects (this is the default)
.It Sy d
Dump ZFS directory objects
.It Sy f
Dump ZFS plain file objects
.It Sy m
Dump SPA space map objects
.It Sy z
Dump ZAP objects
.It Sy -
Negate the effect of next flag
.El
.It Fl D , -dedup-stats
Display deduplication statistics, including the deduplication ratio
.Pq Sy dedup ,
compression ratio
.Pq Sy compress ,
inflation due to the zfs copies property
.Pq Sy copies ,
and an overall effective ratio
.Pq Sy dedup No \(mu Sy compress No / Sy copies .
.It Fl DD
Display a histogram of deduplication statistics, showing the allocated
.Pq physically present on disk
and referenced
.Pq logically referenced in the pool
block counts and sizes by reference count.
.It Fl DDD
Display the statistics independently for each deduplication table.
.It Fl DDDD
Dump the contents of the deduplication tables describing duplicate blocks.
.It Fl DDDDD
Also dump the contents of the deduplication tables describing unique blocks.
.It Fl E , -embedded-block-pointer Ns = Ns Ar word0 : Ns Ar word1 Ns :…: Ns Ar word15
Decode and display block from an embedded block pointer specified by the
.Ar word
arguments.
.It Fl h , -history
Display pool history similar to
.Nm zpool Cm history ,
but include internal changes, transaction, and dataset information.
.It Fl i , -intent-logs
Display information about intent log
.Pq ZIL
entries relating to each dataset.
If specified multiple times, display counts of each intent log transaction type.
.It Fl k , -checkpointed-state
Examine the checkpointed state of the pool.
Note, the on disk format of the pool is not reverted to the checkpointed state.
.It Fl l , -label Ns = Ns Ar device
Read the vdev labels and L2ARC header from the specified device.
.Nm Fl l
will return 0 if valid label was found, 1 if error occurred, and 2 if no valid
labels were found.
The presence of L2ARC header is indicated by a specific
sequence (L2ARC_DEV_HDR_MAGIC).
If there is an accounting error in the size or the number of L2ARC log blocks
.Nm Fl l
will return 1.
Each unique configuration is displayed only once.
.It Fl ll Ar device
In addition display label space usage stats.
If a valid L2ARC header was found
also display the properties of log blocks used for restoring L2ARC contents
(persistent L2ARC).
.It Fl lll Ar device
Display every configuration, unique or not.
If a valid L2ARC header was found
also display the properties of log entries in log blocks used for restoring
L2ARC contents (persistent L2ARC).
.Pp
If the
.Fl q
option is also specified, don't print the labels or the L2ARC header.
.Pp
If the
.Fl u
option is also specified, also display the uberblocks on this device.
Specify multiple times to increase verbosity.
.It Fl L , -disable-leak-tracking
Disable leak detection and the loading of space maps.
By default,
.Nm
verifies that all non-free blocks are referenced, which can be very expensive.
.It Fl m , -metaslabs
Display the offset, spacemap, free space of each metaslab, all the log
spacemaps and their obsolete entry statistics.
.It Fl mm
Also display information about the on-disk free space histogram associated with
each metaslab.
.It Fl mmm
Display the maximum contiguous free space, the in-core free space histogram, and
the percentage of free space in each space map.
.It Fl mmmm
Display every spacemap record.
.It Fl M , -metaslab-groups
Display all "normal" vdev metaslab group information - per-vdev metaslab count,
fragmentation,
and free space histogram, as well as overall pool fragmentation and histogram.
.It Fl MM
"Special" vdevs are added to -M's normal output.
.It Fl O , -object-lookups Ns = Ns Ar dataset path
Also display information about the maximum contiguous free space and the
percentage of free space in each space map.
.It Fl MMM
Display every spacemap record.
.It Fl N
Same as
.Fl d
but force zdb to interpret the
.Op Ar dataset Ns | Ns Ar objset-ID
in
.Op Ar poolname Ns Op / Ns Ar dataset Ns | Ns Ar objset-ID
as a numeric objset ID.
.It Fl O Ar dataset path
Look up the specified
.Ar path
inside of the
.Ar dataset
and display its metadata and indirect blocks.
Specified
.Ar path
must be relative to the root of
.Ar dataset .
This option can be combined with
.Fl v
for increasing verbosity.
.It Fl r , -copy-object Ns = Ns Ar dataset path destination
Copy the specified
.Ar path
inside of the
.Ar dataset
to the specified destination.
Specified
.Ar path
must be relative to the root of
.Ar dataset .
This option can be combined with
.Fl v
for increasing verbosity.
.It Xo
.Fl R , -read-block Ns = Ns Ar poolname vdev : Ns Ar offset : Ns Oo Ar lsize Ns / Oc Ns Ar psize Ns Op : Ns Ar flags
.Xc
Read and display a block from the specified device.
By default the block is displayed as a hex dump, but see the description of the
.Sy r
flag, below.
.Pp
The block is specified in terms of a colon-separated tuple
.Ar vdev
.Pq an integer vdev identifier
.Ar offset
.Pq the offset within the vdev
.Ar size
.Pq the physical size, or logical size / physical size
of the block to read and, optionally,
.Ar flags
.Pq a set of flags, described below .
.Pp
.Bl -tag -compact -width "b offset"
.It Sy b Ar offset
Print block pointer at hex offset
.It Sy c
Calculate and display checksums
.It Sy d
Decompress the block.
Set environment variable
.Nm ZDB_NO_ZLE
to skip zle when guessing.
.It Sy e
Byte swap the block
.It Sy g
Dump gang block header
.It Sy i
Dump indirect block
.It Sy r
Dump raw uninterpreted block data
.It Sy v
Verbose output for guessing compression algorithm
.El
.It Fl s , -io-stats
Report statistics on
.Nm zdb
I/O.
Display operation counts, bandwidth, and error counts of I/O to the pool from
.Nm .
.It Fl S , -simulate-dedup
Simulate the effects of deduplication, constructing a DDT and then display
that DDT as with
.Fl DD .
.It Fl u , -uberblock
Display the current uberblock.
.El
.Pp
Other options:
.Bl -tag -width Ds
.It Fl A , -ignore-assertions
Do not abort should any assertion fail.
.It Fl AA
Enable panic recovery, certain errors which would otherwise be fatal are
demoted to warnings.
.It Fl AAA
Do not abort if asserts fail and also enable panic recovery.
.It Fl e , -exported Ns = Ns Oo Fl p Ar path Oc Ns …
Operate on an exported pool, not present in
.Pa /etc/zfs/zpool.cache .
The
.Fl p
flag specifies the path under which devices are to be searched.
.It Fl x , -dump-blocks Ns = Ns Ar dumpdir
All blocks accessed will be copied to files in the specified directory.
The blocks will be placed in sparse files whose name is the same as
that of the file or device read.
.Nm
can be then run on the generated files.
Note that the
.Fl bbc
flags are sufficient to access
.Pq and thus copy
all metadata on the pool.
.It Fl F , -automatic-rewind
Attempt to make an unreadable pool readable by trying progressively older
transactions.
.It Fl G , -dump-debug-msg
Dump the contents of the zfs_dbgmsg buffer before exiting
.Nm .
zfs_dbgmsg is a buffer used by ZFS to dump advanced debug information.
.It Fl I , -inflight Ns = Ns Ar inflight-I/O-ops
Limit the number of outstanding checksum I/O operations to the specified value.
The default value is 200.
This option affects the performance of the
.Fl c
option.
.It Fl K , -key Ns = Ns Ar key
Decryption key needed to access an encrypted dataset.
This will cause
.Nm
to attempt to unlock the dataset using the encryption root, key format and other
encryption parameters on the given dataset.
.Nm
can still inspect pool and dataset structures on encrypted datasets without
unlocking them, but will not be able to access file names and attributes and
object contents. \fBWARNING:\fP The raw decryption key and any decrypted data
will be in user memory while
.Nm
is running.
Other user programs may be able to extract it by inspecting
.Nm
as it runs.
Exercise extreme caution when using this option in shared or uncontrolled
environments.
.It Fl o , -option Ns = Ns Ar var Ns = Ns Ar value Ns …
Set the given global libzpool variable to the provided value.
The value must be an unsigned 32-bit integer.
Currently only little-endian systems are supported to avoid accidentally setting
the high 32 bits of 64-bit variables.
.It Fl P , -parseable
Print numbers in an unscaled form more amenable to parsing, e.g.\&
.Sy 1000000
rather than
.Sy 1M .
.It Fl t , -txg Ns = Ns Ar transaction
Specify the highest transaction to use when searching for uberblocks.
See also the
.Fl u
and
.Fl l
options for a means to see the available uberblocks and their associated
transaction numbers.
.It Fl U , -cachefile Ns = Ns Ar cachefile
Use a cache file other than
.Pa /etc/zfs/zpool.cache .
.It Fl v , -verbose
Enable verbosity.
Specify multiple times for increased verbosity.
.It Fl V , -verbatim
Attempt verbatim import.
This mimics the behavior of the kernel when loading a pool from a cachefile.
Only usable with
.Fl e .
.It Fl X , -extreme-rewind
Attempt
.Qq extreme
transaction rewind, that is attempt the same recovery as
.Fl F
but read transactions otherwise deemed too old.
.It Fl Y , -all-reconstruction
Attempt all possible combinations when reconstructing indirect split blocks.
This flag disables the individual I/O deadman timer in order to allow as
much time as required for the attempted reconstruction.
.It Fl y , -livelist
Perform validation for livelists that are being deleted.
Scans through the livelist and metaslabs, checking for duplicate entries
and compares the two, checking for potential double frees.
If it encounters issues, warnings will be printed, but the command will not
necessarily fail.
.El
.Pp
Specifying a display option more than once enables verbosity for only that
option, with more occurrences enabling more verbosity.
.Pp
If no options are specified, all information about the named pool will be
displayed at default verbosity.
.
.Sh EXAMPLES
.Ss Example 1 : No Display the configuration of imported pool Ar rpool
.Bd -literal
.No # Nm zdb Fl C Ar rpool
MOS Configuration:
version: 28
name: 'rpool'
.Ed
.
.Ss Example 2 : No Display basic dataset information about Ar rpool
.Bd -literal
.No # Nm zdb Fl d Ar rpool
Dataset mos [META], ID 0, cr_txg 4, 26.9M, 1051 objects
Dataset rpool/swap [ZVOL], ID 59, cr_txg 356, 486M, 2 objects
.Ed
.
.Ss Example 3 : No Display basic information about object 0 in Ar rpool/export/home
.Bd -literal
.No # Nm zdb Fl d Ar rpool/export/home 0
Dataset rpool/export/home [ZPL], ID 137, cr_txg 1546, 32K, 8 objects
Object lvl iblk dblk dsize lsize %full type
0 7 16K 16K 15.0K 16K 25.00 DMU dnode
.Ed
.
.Ss Example 4 : No Display the predicted effect of enabling deduplication on Ar rpool
.Bd -literal
.No # Nm zdb Fl S Ar rpool
Simulated DDT histogram:
bucket allocated referenced
______ ______________________________ ______________________________
refcnt blocks LSIZE PSIZE DSIZE blocks LSIZE PSIZE DSIZE
------ ------ ----- ----- ----- ------ ----- ----- -----
1 694K 27.1G 15.0G 15.0G 694K 27.1G 15.0G 15.0G
2 35.0K 1.33G 699M 699M 74.7K 2.79G 1.45G 1.45G
dedup = 1.11, compress = 1.80, copies = 1.00, dedup * compress / copies = 2.00
.Ed
.
.Sh SEE ALSO
.Xr zfs 8 ,
.Xr zpool 8
diff --git a/sys/contrib/openzfs/man/man8/zfs-create.8 b/sys/contrib/openzfs/man/man8/zfs-create.8
index a7b6097c37f1..b3997d32767c 100644
--- a/sys/contrib/openzfs/man/man8/zfs-create.8
+++ b/sys/contrib/openzfs/man/man8/zfs-create.8
@@ -1,282 +1,279 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd March 16, 2022
.Dt ZFS-CREATE 8
.Os
.
.Sh NAME
.Nm zfs-create
.Nd create ZFS dataset
.Sh SYNOPSIS
.Nm zfs
.Cm create
.Op Fl Pnpuv
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Ar filesystem
.Nm zfs
.Cm create
.Op Fl ps
.Op Fl b Ar blocksize
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Fl V Ar size Ar volume
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm create
.Op Fl Pnpuv
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Ar filesystem
.Xc
Creates a new ZFS file system.
The file system is automatically mounted according to the
.Sy mountpoint
property inherited from the parent, unless the
.Fl u
option is used.
.Bl -tag -width "-o"
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property as if the command
.Nm zfs Cm set Ar property Ns = Ns Ar value
was invoked at the same time the dataset was created.
Any editable ZFS property can also be set at creation time.
Multiple
.Fl o
options can be specified.
An error results if the same property is specified in multiple
.Fl o
options.
.It Fl p
Creates all the non-existing parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
Any property specified on the command line using the
.Fl o
option is ignored.
If the target filesystem already exists, the operation completes successfully.
.It Fl n
Do a dry-run
.Pq Qq No-op
creation.
No datasets will be created.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to validate properties that are passed via
.Fl o
options and those implied by other options.
The actual dataset creation can still fail due to insufficient privileges or
available capacity.
.It Fl P
Print machine-parsable verbose information about the created dataset.
Each line of output contains a key and one or two values, all separated by tabs.
The
.Sy create_ancestors
and
.Sy create
keys have
.Em filesystem
as their only value.
The
.Sy create_ancestors
key only appears if the
.Fl p
option is used.
The
.Sy property
key has two values, a property name that property's value.
The
.Sy property
key may appear zero or more times, once for each property that will be set local
to
.Em filesystem
due to the use of the
.Fl o
option.
.It Fl u
Do not mount the newly created file system.
.It Fl v
Print verbose information about the created dataset.
.El
.It Xo
.Nm zfs
.Cm create
.Op Fl ps
.Op Fl b Ar blocksize
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Fl V Ar size Ar volume
.Xc
Creates a volume of the given size.
The volume is exported as a block device in
.Pa /dev/zvol/path ,
where
.Em path
is the name of the volume in the ZFS namespace.
The size represents the logical size as exported by the device.
By default, a reservation of equal size is created.
.Pp
.Ar size
is automatically rounded up to the nearest multiple of the
.Sy blocksize .
.Bl -tag -width "-b"
.It Fl b Ar blocksize
Equivalent to
.Fl o Sy volblocksize Ns = Ns Ar blocksize .
If this option is specified in conjunction with
.Fl o Sy volblocksize ,
the resulting behavior is undefined.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property as if the
.Nm zfs Cm set Ar property Ns = Ns Ar value
command was invoked at the same time the dataset was created.
Any editable ZFS property can also be set at creation time.
Multiple
.Fl o
options can be specified.
An error results if the same property is specified in multiple
.Fl o
options.
.It Fl p
Creates all the non-existing parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
Any property specified on the command line using the
.Fl o
option is ignored.
If the target filesystem already exists, the operation completes successfully.
.It Fl s
Creates a sparse volume with no reservation.
See
.Sy volsize
in the
.Em Native Properties
section of
.Xr zfsprops 7
for more information about sparse volumes.
.It Fl n
Do a dry-run
.Pq Qq No-op
creation.
No datasets will be created.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to validate properties that are passed via
.Fl o
options and those implied by other options.
The actual dataset creation can still fail due to insufficient privileges or
available capacity.
.It Fl P
Print machine-parsable verbose information about the created dataset.
Each line of output contains a key and one or two values, all separated by tabs.
The
.Sy create_ancestors
and
.Sy create
keys have
.Em volume
as their only value.
The
.Sy create_ancestors
key only appears if the
.Fl p
option is used.
The
.Sy property
key has two values, a property name that property's value.
The
.Sy property
key may appear zero or more times, once for each property that will be set local
to
.Em volume
due to the use of the
.Fl b
or
.Fl o
options, as well as
.Sy refreservation
if the volume is not sparse.
.It Fl v
Print verbose information about the created dataset.
.El
.El
-.Ss ZFS Volumes as Swap
-ZFS volumes may be used as swap devices.
-After creating the volume with the
-.Nm zfs Cm create Fl V
-enable the swap area using the
-.Xr swapon 8
-command.
-Swapping to files on ZFS filesystems is not supported.
+.Ss ZFS for Swap
+Swapping to a ZFS volume is prone to deadlock and not recommended.
+See OpenZFS FAQ.
+.Pp
+Swapping to a file on a ZFS filesystem is not supported.
.
.Sh EXAMPLES
.\" These are, respectively, examples 1, 10 from zfs.8
.\" Make sure to update them bidirectionally
.Ss Example 1 : No Creating a ZFS File System Hierarchy
The following commands create a file system named
.Ar pool/home
and a file system named
.Ar pool/home/bob .
The mount point
.Pa /export/home
is set for the parent file system, and is automatically inherited by the child
file system.
.Dl # Nm zfs Cm create Ar pool/home
.Dl # Nm zfs Cm set Sy mountpoint Ns = Ns Ar /export/home pool/home
.Dl # Nm zfs Cm create Ar pool/home/bob
.
.Ss Example 2 : No Promoting a ZFS Clone
The following commands illustrate how to test out changes to a file system, and
then replace the original file system with the changed one, using clones, clone
promotion, and renaming:
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm create Ar pool/project/production
populate /pool/project/production with data
.No # Nm zfs Cm snapshot Ar pool/project/production Ns @ Ns Ar today
.No # Nm zfs Cm clone Ar pool/project/production@today pool/project/beta
make changes to /pool/project/beta and test them
.No # Nm zfs Cm promote Ar pool/project/beta
.No # Nm zfs Cm rename Ar pool/project/production pool/project/legacy
.No # Nm zfs Cm rename Ar pool/project/beta pool/project/production
once the legacy version is no longer needed, it can be destroyed
.No # Nm zfs Cm destroy Ar pool/project/legacy
.Ed
.
.Sh SEE ALSO
.Xr zfs-destroy 8 ,
.Xr zfs-list 8 ,
.Xr zpool-create 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-events.8 b/sys/contrib/openzfs/man/man8/zpool-events.8
index 0ba93e4166e7..341f902fe66e 100644
--- a/sys/contrib/openzfs/man/man8/zpool-events.8
+++ b/sys/contrib/openzfs/man/man8/zpool-events.8
@@ -1,488 +1,487 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or https://opensource.org/licenses/CDDL-1.0.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 27, 2021
.Dt ZPOOL-EVENTS 8
.Os
.
.Sh NAME
.Nm zpool-events
.Nd list recent events generated by kernel
.Sh SYNOPSIS
.Nm zpool
.Cm events
.Op Fl vHf
.Op Ar pool
.Nm zpool
.Cm events
.Fl c
.
.Sh DESCRIPTION
Lists all recent events generated by the ZFS kernel modules.
These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
For more information about the subclasses and event payloads
that can be generated see
.Sx EVENTS
and the following sections.
.
.Sh OPTIONS
.Bl -tag -compact -width Ds
.It Fl c
Clear all previous events.
.It Fl f
Follow mode.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl v
Print the entire payload for each event.
.El
.
.Sh EVENTS
These are the different event subclasses.
The full event name would be
.Sy ereport.fs.zfs.\& Ns Em SUBCLASS ,
but only the last part is listed here.
.Pp
.Bl -tag -compact -width "vdev.bad_guid_sum"
.It Sy checksum
Issued when a checksum error has been detected.
.It Sy io
Issued when there is an I/O error in a vdev in the pool.
.It Sy data
Issued when there have been data errors in the pool.
.It Sy deadman
Issued when an I/O request is determined to be "hung", this can be caused
by lost completion events due to flaky hardware or drivers.
See
.Sy zfs_deadman_failmode
in
.Xr zfs 4
for additional information regarding "hung" I/O detection and configuration.
.It Sy delay
Issued when a completed I/O request exceeds the maximum allowed time
specified by the
.Sy zio_slow_io_ms
module parameter.
This can be an indicator of problems with the underlying storage device.
The number of delay events is ratelimited by the
.Sy zfs_slow_io_events_per_second
module parameter.
.It Sy config
Issued every time a vdev change have been done to the pool.
.It Sy zpool
Issued when a pool cannot be imported.
.It Sy zpool.destroy
Issued when a pool is destroyed.
.It Sy zpool.export
Issued when a pool is exported.
.It Sy zpool.import
Issued when a pool is imported.
.It Sy zpool.reguid
Issued when a REGUID (new unique identifier for the pool have been regenerated)
have been detected.
.It Sy vdev.unknown
Issued when the vdev is unknown.
Such as trying to clear device errors on a vdev that have failed/been kicked
from the system/pool and is no longer available.
.It Sy vdev.open_failed
Issued when a vdev could not be opened (because it didn't exist for example).
.It Sy vdev.corrupt_data
Issued when corrupt data have been detected on a vdev.
.It Sy vdev.no_replicas
Issued when there are no more replicas to sustain the pool.
This would lead to the pool being
.Em DEGRADED .
.It Sy vdev.bad_guid_sum
Issued when a missing device in the pool have been detected.
.It Sy vdev.too_small
Issued when the system (kernel) have removed a device, and ZFS
notices that the device isn't there any more.
This is usually followed by a
.Sy probe_failure
event.
.It Sy vdev.bad_label
Issued when the label is OK but invalid.
.It Sy vdev.bad_ashift
Issued when the ashift alignment requirement has increased.
.It Sy vdev.remove
Issued when a vdev is detached from a mirror (or a spare detached from a
vdev where it have been used to replace a failed drive - only works if
the original drive have been re-added).
.It Sy vdev.clear
Issued when clearing device errors in a pool.
Such as running
.Nm zpool Cm clear
on a device in the pool.
.It Sy vdev.check
Issued when a check to see if a given vdev could be opened is started.
.It Sy vdev.spare
Issued when a spare have kicked in to replace a failed device.
.It Sy vdev.autoexpand
Issued when a vdev can be automatically expanded.
.It Sy io_failure
Issued when there is an I/O failure in a vdev in the pool.
.It Sy probe_failure
Issued when a probe fails on a vdev.
This would occur if a vdev
have been kicked from the system outside of ZFS (such as the kernel
have removed the device).
.It Sy log_replay
Issued when the intent log cannot be replayed.
The can occur in the case of a missing or damaged log device.
.It Sy resilver.start
Issued when a resilver is started.
.It Sy resilver.finish
Issued when the running resilver have finished.
.It Sy scrub.start
Issued when a scrub is started on a pool.
.It Sy scrub.finish
Issued when a pool has finished scrubbing.
.It Sy scrub.abort
Issued when a scrub is aborted on a pool.
.It Sy scrub.resume
Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
.El
.
.Sh PAYLOADS
This is the payload (data, information) that accompanies an
event.
.Pp
For
.Xr zed 8 ,
these are set to uppercase and prefixed with
.Sy ZEVENT_ .
.Pp
.Bl -tag -compact -width "vdev_cksum_errors"
.It Sy pool
Pool name.
.It Sy pool_failmode
Failmode -
.Sy wait ,
.Sy continue ,
or
.Sy panic .
See the
.Sy failmode
property in
.Xr zpoolprops 7
for more information.
.It Sy pool_guid
The GUID of the pool.
.It Sy pool_context
The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
5=error).
.It Sy vdev_guid
The GUID of the vdev in question (the vdev failing or operated upon with
.Nm zpool Cm clear ,
etc.).
.It Sy vdev_type
Type of vdev -
.Sy disk ,
.Sy file ,
.Sy mirror ,
etc.
See the
.Sy Virtual Devices
section of
.Xr zpoolconcepts 7
for more information on possible values.
.It Sy vdev_path
Full path of the vdev, including any
.Em -partX .
.It Sy vdev_devid
ID of vdev (if any).
.It Sy vdev_fru
Physical FRU location.
.It Sy vdev_state
State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to
open, 5=faulted, 6=degraded, 7=healthy).
.It Sy vdev_ashift
The ashift value of the vdev.
.It Sy vdev_complete_ts
The time the last I/O request completed for the specified vdev.
.It Sy vdev_delta_ts
The time since the last I/O request completed for the specified vdev.
.It Sy vdev_spare_paths
List of spares, including full path and any
.Em -partX .
.It Sy vdev_spare_guids
GUID(s) of spares.
.It Sy vdev_read_errors
How many read errors that have been detected on the vdev.
.It Sy vdev_write_errors
How many write errors that have been detected on the vdev.
.It Sy vdev_cksum_errors
How many checksum errors that have been detected on the vdev.
.It Sy parent_guid
GUID of the vdev parent.
.It Sy parent_type
Type of parent.
See
.Sy vdev_type .
.It Sy parent_path
Path of the vdev parent (if any).
.It Sy parent_devid
ID of the vdev parent (if any).
.It Sy zio_objset
The object set number for a given I/O request.
.It Sy zio_object
The object number for a given I/O request.
.It Sy zio_level
The indirect level for the block.
Level 0 is the lowest level and includes data blocks.
Values > 0 indicate metadata blocks at the appropriate level.
.It Sy zio_blkid
The block ID for a given I/O request.
.It Sy zio_err
The error number for a failure when handling a given I/O request,
compatible with
.Xr errno 3
with the value of
.Sy EBADE
used to indicate a ZFS checksum error.
.It Sy zio_offset
The offset in bytes of where to write the I/O request for the specified vdev.
.It Sy zio_size
The size in bytes of the I/O request.
.It Sy zio_flags
The current flags describing how the I/O request should be handled.
See the
.Sy I/O FLAGS
section for the full list of I/O flags.
.It Sy zio_stage
The current stage of the I/O in the pipeline.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_pipeline
The valid pipeline stages for the I/O.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_delay
The time elapsed (in nanoseconds) waiting for the block layer to complete the
I/O request.
Unlike
.Sy zio_delta ,
this does not include any vdev queuing time and is
therefore solely a measure of the block layer performance.
.It Sy zio_timestamp
The time when a given I/O request was submitted.
.It Sy zio_delta
The time required to service a given I/O request.
.It Sy prev_state
The previous state of the vdev.
.It Sy cksum_expected
The expected checksum value for the block.
.It Sy cksum_actual
The actual checksum value for an errant block.
.It Sy cksum_algorithm
Checksum algorithm used.
See
.Xr zfsprops 7
for more information on the available checksum algorithms.
.It Sy cksum_byteswap
Whether or not the data is byteswapped.
.It Sy bad_ranges
.No [\& Ns Ar start , end )
pairs of corruption offsets.
Offsets are always aligned on a 64-bit boundary,
and can include some gaps of non-corruption.
(See
.Sy bad_ranges_min_gap )
.It Sy bad_ranges_min_gap
In order to bound the size of the
.Sy bad_ranges
array, gaps of non-corruption
less than or equal to
.Sy bad_ranges_min_gap
bytes have been merged with
adjacent corruption.
Always at least 8 bytes, since corruption is detected on a 64-bit word basis.
.It Sy bad_range_sets
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits in that range which were clear in the good data and set
in the bad data.
.It Sy bad_range_clears
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits for that range which were set in the good data and clear in
the bad data.
.It Sy bad_set_bits
If this field exists, it is an array of
.Pq Ar bad data No & ~( Ns Ar good data ) ;
that is, the bits set in the bad data which are cleared in the good data.
Each element corresponds a byte whose offset is in a range in
.Sy bad_ranges ,
and the array is ordered by offset.
Thus, the first element is the first byte in the first
.Sy bad_ranges
range, and the last element is the last byte in the last
.Sy bad_ranges
range.
.It Sy bad_cleared_bits
Like
.Sy bad_set_bits ,
but contains
.Pq Ar good data No & ~( Ns Ar bad data ) ;
that is, the bits set in the good data which are cleared in the bad data.
.It Sy bad_set_histogram
If this field exists, it is an array of counters.
Each entry counts bits set in a particular bit of a big-endian uint64 type.
The first entry counts bits
set in the high-order bit of the first byte, the 9th byte, etc, and the last
entry counts bits set of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data path,
such as IDE or parallel SCSI.
.It Sy bad_cleared_histogram
If this field exists, it is an array of counters.
Each entry counts bit clears in a particular bit of a big-endian uint64 type.
The first entry counts bits
clears of the high-order bit of the first byte, the 9th byte, etc, and the
last entry counts clears of the low-order bit of the 8th byte, the 16th byte,
etc.
This information is useful for observing a stuck bit in a parallel data
path, such as IDE or parallel SCSI.
.El
.
.Sh I/O STAGES
The ZFS I/O pipeline is comprised of various stages which are defined below.
The individual stages are used to construct these basic I/O
operations: Read, Write, Free, Claim, and Ioctl.
These stages may be
set on an event to describe the life cycle of a given I/O request.
.Pp
.TS
tab(:);
l l l .
Stage:Bit Mask:Operations
_:_:_
ZIO_STAGE_OPEN:0x00000001:RWFCI
ZIO_STAGE_READ_BP_INIT:0x00000002:R----
ZIO_STAGE_WRITE_BP_INIT:0x00000004:-W---
ZIO_STAGE_FREE_BP_INIT:0x00000008:--F--
ZIO_STAGE_ISSUE_ASYNC:0x00000010:RWF--
ZIO_STAGE_WRITE_COMPRESS:0x00000020:-W---
ZIO_STAGE_ENCRYPT:0x00000040:-W---
ZIO_STAGE_CHECKSUM_GENERATE:0x00000080:-W---
ZIO_STAGE_NOP_WRITE:0x00000100:-W---
ZIO_STAGE_BRT_FREE:0x00000200:--F--
ZIO_STAGE_DDT_READ_START:0x00000400:R----
ZIO_STAGE_DDT_READ_DONE:0x00000800:R----
ZIO_STAGE_DDT_WRITE:0x00001000:-W---
ZIO_STAGE_DDT_FREE:0x00002000:--F--
ZIO_STAGE_GANG_ASSEMBLE:0x00004000:RWFC-
ZIO_STAGE_GANG_ISSUE:0x00008000:RWFC-
ZIO_STAGE_DVA_THROTTLE:0x00010000:-W---
ZIO_STAGE_DVA_ALLOCATE:0x00020000:-W---
ZIO_STAGE_DVA_FREE:0x00040000:--F--
ZIO_STAGE_DVA_CLAIM:0x00080000:---C-
ZIO_STAGE_READY:0x00100000:RWFCI
ZIO_STAGE_VDEV_IO_START:0x00200000:RW--I
ZIO_STAGE_VDEV_IO_DONE:0x00400000:RW--I
ZIO_STAGE_VDEV_IO_ASSESS:0x00800000:RW--I
ZIO_STAGE_CHECKSUM_VERIFY:0x01000000:R----
ZIO_STAGE_DONE:0x02000000:RWFCI
.TE
.
.Sh I/O FLAGS
Every I/O request in the pipeline contains a set of flags which describe its
function and are used to govern its behavior.
These flags will be set in an event as a
.Sy zio_flags
payload entry.
.Pp
.TS
tab(:);
l l .
Flag:Bit Mask
_:_
ZIO_FLAG_DONT_AGGREGATE:0x00000001
ZIO_FLAG_IO_REPAIR:0x00000002
ZIO_FLAG_SELF_HEAL:0x00000004
ZIO_FLAG_RESILVER:0x00000008
ZIO_FLAG_SCRUB:0x00000010
ZIO_FLAG_SCAN_THREAD:0x00000020
ZIO_FLAG_PHYSICAL:0x00000040
ZIO_FLAG_CANFAIL:0x00000080
ZIO_FLAG_SPECULATIVE:0x00000100
ZIO_FLAG_CONFIG_WRITER:0x00000200
ZIO_FLAG_DONT_RETRY:0x00000400
-ZIO_FLAG_DONT_CACHE:0x00000800
ZIO_FLAG_NODATA:0x00001000
ZIO_FLAG_INDUCE_DAMAGE:0x00002000
ZIO_FLAG_IO_ALLOCATING:0x00004000
ZIO_FLAG_IO_RETRY:0x00008000
ZIO_FLAG_PROBE:0x00010000
ZIO_FLAG_TRYHARD:0x00020000
ZIO_FLAG_OPTIONAL:0x00040000
ZIO_FLAG_DONT_QUEUE:0x00080000
ZIO_FLAG_DONT_PROPAGATE:0x00100000
ZIO_FLAG_IO_BYPASS:0x00200000
ZIO_FLAG_IO_REWRITE:0x00400000
ZIO_FLAG_RAW_COMPRESS:0x00800000
ZIO_FLAG_RAW_ENCRYPT:0x01000000
ZIO_FLAG_GANG_CHILD:0x02000000
ZIO_FLAG_DDT_CHILD:0x04000000
ZIO_FLAG_GODFATHER:0x08000000
ZIO_FLAG_NOPWRITE:0x10000000
ZIO_FLAG_REEXECUTED:0x20000000
ZIO_FLAG_DELEGATED:0x40000000
ZIO_FLAG_FASTWRITE:0x80000000
.TE
.
.Sh SEE ALSO
.Xr zfs 4 ,
.Xr zed 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 8d29f56c2fb8..485331ac655e 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -1,484 +1,497 @@
# When integrated in to a monolithic kernel the spl module must appear
# first. This ensures its module initialization function is run before
# any of the other module initialization functions which depend on it.
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
icp_include = @abs_srcdir@/icp/include
zstd_include = @abs_srcdir@/zstd/include
ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h
ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include
src = @abs_srcdir@
obj = @abs_builddir@
else
zfs_include = $(srctree)/include/zfs
icp_include = $(srctree)/$(src)/icp/include
zstd_include = $(srctree)/$(src)/zstd/include
ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h
endif
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl
ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs
ZFS_MODULE_CFLAGS += -I$(zfs_include)
ZFS_MODULE_CPPFLAGS += -D_KERNEL
ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@
# KASAN enables -Werror=frame-larger-than=1024, which
# breaks oh so many parts of our build.
ifeq ($(CONFIG_KASAN),y)
ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than=
endif
+# Generated binary search code is particularly bad with this optimization.
+# Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c
+# is not affected when unrolling is done.
+# Disable it until the following upstream issue is resolved:
+# https://github.com/llvm/llvm-project/issues/62790
+ifeq ($(CONFIG_X86),y)
+ifeq ($(CONFIG_CC_IS_CLANG),y)
+CFLAGS_zfs/dsl_scan.o += -mllvm -x86-cmov-converter=false
+CFLAGS_zfs/metaslab.o += -mllvm -x86-cmov-converter=false
+CFLAGS_zfs/range_tree.o += -mllvm -x86-cmov-converter=false
+CFLAGS_zfs/zap_micro.o += -mllvm -x86-cmov-converter=false
+endif
+endif
+
ifneq ($(KBUILD_EXTMOD),)
@CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include
@CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@
endif
asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
ifeq ($(CONFIG_ARM64),y)
CFLAGS_REMOVE_zcommon/zfs_fletcher_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neon.o += -mgeneral-regs-only
CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neonx2.o += -mgeneral-regs-only
endif
# Suppress unused-value warnings in sparc64 architecture headers
ccflags-$(CONFIG_SPARC64) += -Wno-unused-value
obj-$(CONFIG_ZFS) := spl.o zfs.o
SPL_OBJS := \
spl-atomic.o \
spl-condvar.o \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \
spl-proc.o \
spl-procfs-list.o \
spl-taskq.o \
spl-thread.o \
spl-trace.o \
spl-tsd.o \
spl-vmem.o \
spl-xdr.o \
spl-zlib.o \
spl-zone.o
spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS))
zfs-objs += avl/avl.o
ICP_OBJS := \
algs/aes/aes_impl.o \
algs/aes/aes_impl_generic.o \
algs/aes/aes_modes.o \
algs/blake3/blake3.o \
algs/blake3/blake3_generic.o \
algs/blake3/blake3_impl.o \
algs/edonr/edonr.o \
algs/modes/cbc.o \
algs/modes/ccm.o \
algs/modes/ctr.o \
algs/modes/ecb.o \
algs/modes/gcm.o \
algs/modes/gcm_generic.o \
algs/modes/modes.o \
algs/sha2/sha2_generic.o \
algs/sha2/sha256_impl.o \
algs/sha2/sha512_impl.o \
algs/skein/skein.o \
algs/skein/skein_block.o \
algs/skein/skein_iv.o \
api/kcf_cipher.o \
api/kcf_ctxops.o \
api/kcf_mac.o \
core/kcf_callprov.o \
core/kcf_mech_tabs.o \
core/kcf_prov_lib.o \
core/kcf_prov_tabs.o \
core/kcf_sched.o \
illumos-crypto.o \
io/aes.o \
io/sha2_mod.o \
io/skein_mod.o \
spi/kcf_spi.o
ICP_OBJS_X86_64 := \
asm-x86_64/aes/aes_aesni.o \
asm-x86_64/aes/aes_amd64.o \
asm-x86_64/aes/aeskey.o \
asm-x86_64/blake3/blake3_avx2.o \
asm-x86_64/blake3/blake3_avx512.o \
asm-x86_64/blake3/blake3_sse2.o \
asm-x86_64/blake3/blake3_sse41.o \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
ICP_OBJS_X86 := \
algs/aes/aes_impl_aesni.o \
algs/aes/aes_impl_x86-64.o \
algs/modes/gcm_pclmulqdq.o
ICP_OBJS_ARM := \
asm-arm/sha2/sha256-armv7.o \
asm-arm/sha2/sha512-armv7.o
ICP_OBJS_ARM64 := \
asm-aarch64/blake3/b3_aarch64_sse2.o \
asm-aarch64/blake3/b3_aarch64_sse41.o \
asm-aarch64/sha2/sha256-armv8.o \
asm-aarch64/sha2/sha512-armv8.o
ICP_OBJS_PPC_PPC64 := \
asm-ppc64/blake3/b3_ppc64le_sse2.o \
asm-ppc64/blake3/b3_ppc64le_sse41.o \
asm-ppc64/sha2/sha256-p8.o \
asm-ppc64/sha2/sha512-p8.o \
asm-ppc64/sha2/sha256-ppc.o \
asm-ppc64/sha2/sha512-ppc.o
zfs-objs += $(addprefix icp/,$(ICP_OBJS))
zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86))
zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64))
zfs-$(CONFIG_ARM) += $(addprefix icp/,$(ICP_OBJS_ARM))
zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64))
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
$(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \
$(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include)
# Suppress objtool "return with modified stack frame" warnings.
OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y
# Suppress objtool "unsupported stack pointer realignment" warnings.
# See #6950 for the reasoning.
OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y
OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y
LUA_OBJS := \
lapi.o \
lauxlib.o \
lbaselib.o \
lcode.o \
lcompat.o \
lcorolib.o \
lctype.o \
ldebug.o \
ldo.o \
lfunc.o \
lgc.o \
llex.o \
lmem.o \
lobject.o \
lopcodes.o \
lparser.o \
lstate.o \
lstring.o \
lstrlib.o \
ltable.o \
ltablib.o \
ltm.o \
lvm.o \
lzio.o \
setjmp/setjmp.o
zfs-objs += $(addprefix lua/,$(LUA_OBJS))
NVPAIR_OBJS := \
fnvpair.o \
nvpair.o \
nvpair_alloc_fixed.o \
nvpair_alloc_spl.o
zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS))
UNICODE_OBJS := \
u8_textprep.o \
uconv.o
zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS))
ZCOMMON_OBJS := \
cityhash.o \
zfeature_common.o \
zfs_comutil.o \
zfs_deleg.o \
zfs_fletcher.o \
zfs_fletcher_superscalar.o \
zfs_fletcher_superscalar4.o \
zfs_namecheck.o \
zfs_prop.o \
zpool_prop.o \
zprop_common.o
ZCOMMON_OBJS_X86 := \
zfs_fletcher_avx512.o \
zfs_fletcher_intel.o \
zfs_fletcher_sse.o
ZCOMMON_OBJS_ARM64 := \
zfs_fletcher_aarch64_neon.o
zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS))
zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64))
# Zstd uses -O3 by default, so we should follow
ZFS_ZSTD_FLAGS := -O3
# -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h
# Set it for other compilers, too.
ZFS_ZSTD_FLAGS += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
ZFS_ZSTD_FLAGS += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib
ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480
ZSTD_OBJS := \
zfs_zstd.o \
zstd_sparc.o
ZSTD_UPSTREAM_OBJS := \
lib/common/entropy_common.o \
lib/common/error_private.o \
lib/common/fse_decompress.o \
lib/common/pool.o \
lib/common/zstd_common.o \
lib/compress/fse_compress.o \
lib/compress/hist.o \
lib/compress/huf_compress.o \
lib/compress/zstd_compress.o \
lib/compress/zstd_compress_literals.o \
lib/compress/zstd_compress_sequences.o \
lib/compress/zstd_compress_superblock.o \
lib/compress/zstd_double_fast.o \
lib/compress/zstd_fast.o \
lib/compress/zstd_lazy.o \
lib/compress/zstd_ldm.o \
lib/compress/zstd_opt.o \
lib/decompress/huf_decompress.o \
lib/decompress/zstd_ddict.o \
lib/decompress/zstd_decompress.o \
lib/decompress/zstd_decompress_block.o
zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS))
# Disable aarch64 neon SIMD instructions for kernel mode
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS)
$(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include)
$(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w
$(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h
ZFS_OBJS := \
abd.o \
aggsum.o \
arc.o \
blake3_zfs.o \
blkptr.o \
bplist.o \
bpobj.o \
bptree.o \
bqueue.o \
brt.o \
btree.o \
dataset_kstats.o \
dbuf.o \
dbuf_stats.o \
ddt.o \
ddt_zap.o \
dmu.o \
dmu_diff.o \
dmu_object.o \
dmu_objset.o \
dmu_recv.o \
dmu_redact.o \
dmu_send.o \
dmu_traverse.o \
dmu_tx.o \
dmu_zfetch.o \
dnode.o \
dnode_sync.o \
dsl_bookmark.o \
dsl_crypt.o \
dsl_dataset.o \
dsl_deadlist.o \
dsl_deleg.o \
dsl_destroy.o \
dsl_dir.o \
dsl_pool.o \
dsl_prop.o \
dsl_scan.o \
dsl_synctask.o \
dsl_userhold.o \
edonr_zfs.o \
fm.o \
gzip.o \
hkdf.o \
lz4.o \
lz4_zfs.o \
lzjb.o \
metaslab.o \
mmp.o \
multilist.o \
objlist.o \
pathname.o \
range_tree.o \
refcount.o \
rrwlock.o \
sa.o \
sha2_zfs.o \
skein_zfs.o \
spa.o \
spa_checkpoint.o \
spa_config.o \
spa_errlog.o \
spa_history.o \
spa_log_spacemap.o \
spa_misc.o \
spa_stats.o \
space_map.o \
space_reftree.o \
txg.o \
uberblock.o \
unique.o \
vdev.o \
- vdev_cache.o \
vdev_draid.o \
vdev_draid_rand.o \
vdev_indirect.o \
vdev_indirect_births.o \
vdev_indirect_mapping.o \
vdev_initialize.o \
vdev_label.o \
vdev_mirror.o \
vdev_missing.o \
vdev_queue.o \
vdev_raidz.o \
vdev_raidz_math.o \
vdev_raidz_math_scalar.o \
vdev_rebuild.o \
vdev_removal.o \
vdev_root.o \
vdev_trim.o \
zap.o \
zap_leaf.o \
zap_micro.o \
zcp.o \
zcp_get.o \
zcp_global.o \
zcp_iter.o \
zcp_set.o \
zcp_synctask.o \
zfeature.o \
zfs_byteswap.o \
zfs_chksum.o \
zfs_fm.o \
zfs_fuid.o \
zfs_impl.o \
zfs_ioctl.o \
zfs_log.o \
zfs_onexit.o \
zfs_quota.o \
zfs_ratelimit.o \
zfs_replay.o \
zfs_rlock.o \
zfs_sa.o \
zfs_vnops.o \
zil.o \
zio.o \
zio_checksum.o \
zio_compress.o \
zio_inject.o \
zle.o \
zrlock.o \
zthr.o \
zvol.o
ZFS_OBJS_OS := \
abd_os.o \
arc_os.o \
mmp_os.o \
policy.o \
qat.o \
qat_compress.o \
qat_crypt.o \
spa_misc_os.o \
trace.o \
vdev_disk.o \
vdev_file.o \
zfs_acl.o \
zfs_ctldir.o \
zfs_debug.o \
zfs_dir.o \
zfs_file_os.o \
zfs_ioctl_os.o \
zfs_racct.o \
zfs_sysfs.o \
zfs_uio.o \
zfs_vfsops.o \
zfs_vnops_os.o \
zfs_znode.o \
zio_crypt.o \
zpl_ctldir.o \
zpl_export.o \
zpl_file.o \
zpl_inode.o \
zpl_super.o \
zpl_xattr.o \
zvol_os.o
ZFS_OBJS_X86 := \
vdev_raidz_math_avx2.o \
vdev_raidz_math_avx512bw.o \
vdev_raidz_math_avx512f.o \
vdev_raidz_math_sse2.o \
vdev_raidz_math_ssse3.o
ZFS_OBJS_ARM64 := \
vdev_raidz_math_aarch64_neon.o \
vdev_raidz_math_aarch64_neonx2.o
ZFS_OBJS_PPC_PPC64 := \
vdev_raidz_math_powerpc_altivec.o
zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS))
zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86))
zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64))
zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64))
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
ifeq ($(CONFIG_ALTIVEC),y)
$(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec
endif
diff --git a/sys/contrib/openzfs/module/Makefile.bsd b/sys/contrib/openzfs/module/Makefile.bsd
index 365609fb8585..0c4d8bfe1159 100644
--- a/sys/contrib/openzfs/module/Makefile.bsd
+++ b/sys/contrib/openzfs/module/Makefile.bsd
@@ -1,530 +1,543 @@
.if !defined(WITH_CTF)
WITH_CTF=1
.endif
.include <bsd.sys.mk>
SRCDIR=${.CURDIR}
INCDIR=${.CURDIR:H}/include
KMOD= openzfs
.PATH: ${SRCDIR}/avl \
${SRCDIR}/lua \
${SRCDIR}/nvpair \
${SRCDIR}/icp/algs/blake3 \
${SRCDIR}/icp/algs/edonr \
${SRCDIR}/icp/algs/sha2 \
${SRCDIR}/icp/asm-aarch64/blake3 \
${SRCDIR}/icp/asm-aarch64/sha2 \
${SRCDIR}/icp/asm-arm/sha2 \
${SRCDIR}/icp/asm-ppc64/sha2 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
${SRCDIR}/icp/asm-x86_64/sha2 \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
${SRCDIR}/unicode \
${SRCDIR}/zcommon \
${SRCDIR}/zfs \
${SRCDIR}/zstd \
${SRCDIR}/zstd/lib/common \
${SRCDIR}/zstd/lib/compress \
${SRCDIR}/zstd/lib/decompress
CFLAGS+= -I${INCDIR}
CFLAGS+= -I${SRCDIR}/icp/include
CFLAGS+= -I${INCDIR}/os/freebsd
CFLAGS+= -I${INCDIR}/os/freebsd/spl
CFLAGS+= -I${INCDIR}/os/freebsd/zfs
CFLAGS+= -I${SRCDIR}/zstd/include
CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -I${.CURDIR}
CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS -D__BSD_VISIBLE=1 \
-DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \
-D_SYS_VMEM_H_ -DKDTRACE_HOOKS -DCOMPAT_FREEBSD11
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
-DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
.endif
.if defined(WITH_DEBUG) && ${WITH_DEBUG} == "true"
CFLAGS+= -DZFS_DEBUG -g
.if defined(WITH_INVARIANTS) && ${WITH_INVARIANTS} == "true"
CFLAGS+= -DINVARIANTS -DWITNESS -DOPENSOLARIS_WITNESS
.endif
.if defined(WITH_O0) && ${WITH_O0} == "true"
CFLAGS+= -O0
.endif
.else
CFLAGS += -DNDEBUG
.endif
.if defined(WITH_VFS_DEBUG) && ${WITH_VFS_DEBUG} == "true"
# kernel must also be built with this option for this to work
CFLAGS+= -DDEBUG_VFS_LOCKS
.endif
.if defined(WITH_GCOV) && ${WITH_GCOV} == "true"
CFLAGS+= -fprofile-arcs -ftest-coverage
.endif
DEBUG_FLAGS=-g
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
.endif
SRCS= vnode_if.h device_if.h bus_if.h
# avl
SRCS+= avl.c
# icp
SRCS+= edonr.c
#icp/algs/blake3
SRCS+= blake3.c \
blake3_generic.c \
blake3_impl.c
#icp/asm-aarch64/blake3
SRCS+= b3_aarch64_sse2.S \
b3_aarch64_sse41.S
#icp/asm-ppc64/blake3
SRCS+= b3_ppc64le_sse2.S \
b3_ppc64le_sse41.S
#icp/asm-x86_64/blake3
SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
#icp/algs/sha2
SRCS+= sha2_generic.c \
sha256_impl.c \
sha512_impl.c
#icp/asm-arm/sha2
SRCS+= sha256-armv7.S \
sha512-armv7.S
#icp/asm-aarch64/sha2
SRCS+= sha256-armv8.S \
sha512-armv8.S
#icp/asm-ppc64/sha2
SRCS+= sha256-p8.S \
sha512-p8.S \
sha256-ppc.S \
sha512-ppc.S
#icp/asm-x86_64/sha2
SRCS+= sha256-x86_64.S \
sha512-x86_64.S
#lua
SRCS+= lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
#nvpair
SRCS+= nvpair.c \
fnvpair.c \
nvpair_alloc_spl.c \
nvpair_alloc_fixed.c
#os/freebsd/spl
SRCS+= acl_common.c \
callb.c \
list.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
spl_kmem.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_procfs_list.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
spl_taskq.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zlib.c \
spl_zone.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
#os/freebsd/zfs
SRCS+= abd_os.c \
arc_os.c \
crypto_os.c \
dmu_os.c \
event_os.c \
hkdf.c \
kmod_core.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_geom.c \
vdev_label_os.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_debug.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_racct.c \
zfs_vfsops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
#unicode
SRCS+= uconv.c \
u8_textprep.c
#zcommon
SRCS+= zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
#zfs
SRCS+= abd.c \
aggsum.c \
arc.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
brt.c \
btree.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
bptree.c \
bqueue.c \
dataset_kstats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_bookmark.c \
dsl_dir.c \
dsl_crypt.c \
dsl_destroy.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_userhold.c \
edonr_zfs.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha2_zfs.c \
skein_zfs.c \
spa.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
uberblock.c \
unique.c \
vdev.c \
- vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_indirect.c \
vdev_indirect_births.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_impl.c \
zfs_ioctl.c \
zfs_log.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_replay.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_inject.c \
zle.c \
zrlock.c \
zthr.c \
zvol.c
#zstd
SRCS+= zfs_zstd.c \
entropy_common.c \
error_private.c \
fse_compress.c \
fse_decompress.c \
hist.c \
huf_compress.c \
huf_decompress.c \
pool.c \
xxhash.c \
zstd_common.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c
beforeinstall:
.if ${MK_DEBUG_FILES} != "no"
mtree -eu \
-f /etc/mtree/BSD.debug.dist \
-p ${DESTDIR}/usr/lib
.endif
.include <bsd.kmod.mk>
+# Generated binary search code is particularly bad with this optimization.
+# Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c
+# is not affected when unrolling is done.
+# Disable it until the following upstream issue is resolved:
+# https://github.com/llvm/llvm-project/issues/62790
+.if ${CC} == "clang"
+.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64"
+CFLAGS.dsl_scan.c= -mllvm -x86-cmov-converter=false
+CFLAGS.metaslab.c= -mllvm -x86-cmov-converter=false
+CFLAGS.range_tree.c= -mllvm -x86-cmov-converter=false
+CFLAGS.zap_micro.c= -mllvm -x86-cmov-converter=false
+.endif
+.endif
+
CFLAGS.sysctl_os.c= -include ../zfs_config.h
CFLAGS.xxhash.c+= -include ${SYSDIR}/sys/_null.h
CFLAGS.gcc+= -Wno-pointer-to-int-cast
CFLAGS.abd.c= -Wno-cast-qual
CFLAGS.ddt.c= -Wno-cast-qual
CFLAGS.dmu.c= -Wno-cast-qual
CFLAGS.dmu_traverse.c= -Wno-cast-qual
CFLAGS.dnode.c= ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.dsl_deadlist.c= -Wno-cast-qual
CFLAGS.dsl_dir.c= -Wno-cast-qual
CFLAGS.dsl_prop.c= -Wno-cast-qual
CFLAGS.edonr.c= -Wno-cast-qual
CFLAGS.fm.c= -Wno-cast-qual
CFLAGS.hist.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.lapi.c= -Wno-cast-qual
CFLAGS.lcompat.c= -Wno-cast-qual
CFLAGS.ldo.c= ${NO_WINFINITE_RECURSION}
CFLAGS.lobject.c= -Wno-cast-qual
CFLAGS.ltable.c= -Wno-cast-qual
CFLAGS.lvm.c= -Wno-cast-qual
CFLAGS.lz4.c= -Wno-cast-qual
CFLAGS.lz4_zfs.c= -Wno-cast-qual
CFLAGS.nvpair.c= -Wno-cast-qual -DHAVE_RPC_TYPES ${NO_WSTRINGOP_OVERREAD}
CFLAGS.pool.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.pool.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.spa.c= -Wno-cast-qual
CFLAGS.spa_misc.c= -Wno-cast-qual
CFLAGS.spl_string.c= -Wno-cast-qual
CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.vdev_draid.c= -Wno-cast-qual
CFLAGS.vdev_raidz.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.zap_leaf.c= -Wno-cast-qual
CFLAGS.zap_micro.c= -Wno-cast-qual
CFLAGS.zcp.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fm.c= -Wno-cast-qual ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zfs_ioctl.c= -Wno-cast-qual
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zprop_common.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual
#zstd
CFLAGS.entropy_common.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.error_private.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL} ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.fse_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.xxhash.c+= -U__BMI__ -fno-tree-vectorize
CFLAGS.xxhash.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_common.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_literals.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_sequences.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL} ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zstd_ddict.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress_block.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_double_fast.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_fast.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_lazy.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ldm.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_opt.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
.if ${MACHINE_CPUARCH} == "aarch64"
__ZFS_ZSTD_AARCH64_FLAGS= -include ${SRCDIR}/zstd/include/aarch64_compat.h
CFLAGS.zstd.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.entropy_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.error_private.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.hist.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.pool.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.xxhash.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_literals.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_sequences.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_superblock.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ddict.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress_block.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_double_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_lazy.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ldm.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_opt.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
sha256-armv8.o: sha256-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
sha512-armv8.o: sha512-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
b3_aarch64_sse2.o: b3_aarch64_sse2.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
b3_aarch64_sse41.o: b3_aarch64_sse41.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
.endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index cc616f33db96..8ae2f23c3ecf 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -1,896 +1,894 @@
/*
* Copyright (c) 2020 iXsystems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/buf.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/arc_os.h>
#include <sys/dmu.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/sunddi.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <sys/nvpair.h>
#include <sys/mount.h>
#include <sys/taskqueue.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zcp.h>
#include <sys/zio_checksum.h>
#include <sys/vdev_removal.h>
#include <sys/dsl_crypt.h>
#include <sys/zfs_ioctl_compat.h>
#include <sys/zfs_context.h>
#include <sys/arc_impl.h>
#include <sys/dsl_pool.h>
#include <sys/vmmeter.h>
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, arc, CTLFLAG_RW, 0,
"ZFS adaptive replacement cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, brt, CTLFLAG_RW, 0,
"ZFS Block Reference Table");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, condense, CTLFLAG_RW, 0, "ZFS condense");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf, CTLFLAG_RW, 0, "ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dbuf_cache, CTLFLAG_RW, 0,
"ZFS disk buf cache");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, deadman, CTLFLAG_RW, 0, "ZFS deadman");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, dedup, CTLFLAG_RW, 0, "ZFS dedup");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, l2arc, CTLFLAG_RW, 0, "ZFS l2arc");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, livelist, CTLFLAG_RW, 0, "ZFS livelist");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, lua, CTLFLAG_RW, 0, "ZFS lua");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, mg, CTLFLAG_RW, 0, "ZFS metaslab group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, multihost, CTLFLAG_RW, 0,
"ZFS multihost protection");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, prefetch, CTLFLAG_RW, 0, "ZFS prefetch");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, reconstruct, CTLFLAG_RW, 0, "ZFS reconstruct");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, recv, CTLFLAG_RW, 0, "ZFS receive");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, send, CTLFLAG_RW, 0, "ZFS send");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
SYSCTL_NODE(_vfs_zfs_livelist, OID_AUTO, condense, CTLFLAG_RW, 0,
"ZFS livelist condense");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, file, CTLFLAG_RW, 0, "ZFS VDEV file");
SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0,
"ZFS VDEV mirror");
SYSCTL_DECL(_vfs_zfs_version);
SYSCTL_CONST_STRING(_vfs_zfs_version, OID_AUTO, module, CTLFLAG_RD,
(ZFS_META_VERSION "-" ZFS_META_RELEASE), "OpenZFS module version");
/* arc.c */
int
param_set_arc_u64(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_64(oidp, arg1, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_int(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_int(oidp, arg1, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_max(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_max;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < MIN_ARC_MAX || val <= arc_c_min ||
val >= arc_all_memory()))
return (SET_ERROR(EINVAL));
zfs_arc_max = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_max = arc_c_max;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_max, "LU",
"Maximum ARC size in bytes (LEGACY)");
/* END CSTYLED */
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_arc_min;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val != 0 && (val < 2ULL << SPA_MAXBLOCKSHIFT || val > arc_c_max))
return (SET_ERROR(EINVAL));
zfs_arc_min = val;
arc_tuning_update(B_TRUE);
/* Update the sysctl to the tuned value */
if (val != 0)
zfs_arc_min = arc_c_min;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_min, "LU",
"Minimum ARC size in bytes (LEGACY)");
/* END CSTYLED */
extern uint_t zfs_arc_free_target;
int
param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
{
uint_t val;
int err;
val = zfs_arc_free_target;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < minfree)
return (EINVAL);
if (val > vm_cnt.v_page_count)
return (EINVAL);
zfs_arc_free_target = val;
return (0);
}
/*
* NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
* pagedaemon initialization.
*/
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim"
" (LEGACY)");
/* END CSTYLED */
int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = arc_no_grow_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 0 || val >= arc_shrink_shift)
return (EINVAL);
arc_no_grow_shift = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing) (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_max;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
CTLFLAG_RWTUN, &l2arc_write_max, 0,
"Max write bytes per interval (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
CTLFLAG_RWTUN, &l2arc_write_boost, 0,
"Extra write bytes during device warmup (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
CTLFLAG_RWTUN, &l2arc_headroom, 0,
"Number of max device writes to precache (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
"Compressed l2arc_headroom multiplier (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_secs;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
"Seconds between L2ARC writing (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_min_ms;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
"Min feed interval in milliseconds (LEGACY)");
/* END CSTYLED */
extern int l2arc_noprefetch;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
"Skip caching prefetched buffers (LEGACY)");
/* END CSTYLED */
extern int l2arc_feed_again;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
CTLFLAG_RWTUN, &l2arc_feed_again, 0,
"Turbo L2ARC warmup (LEGACY)");
/* END CSTYLED */
extern int l2arc_norw;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
CTLFLAG_RWTUN, &l2arc_norw, 0,
"No reads during writes (LEGACY)");
/* END CSTYLED */
static int
param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
{
arc_state_t *state = (arc_state_t *)arg1;
int64_t val;
val = zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
return (sysctl_handle_64(oidp, &val, 0, req));
}
extern arc_state_t ARC_anon;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_anon, 0, param_get_arc_state_size, "Q",
"size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in anonymous state");
/* END CSTYLED */
extern arc_state_t ARC_mru;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru, 0, param_get_arc_state_size, "Q",
"size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru state");
/* END CSTYLED */
extern arc_state_t ARC_mru_ghost;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru_ghost, 0, param_get_arc_state_size, "Q",
"size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru ghost state");
/* END CSTYLED */
extern arc_state_t ARC_mfu;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu, 0, param_get_arc_state_size, "Q",
"size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu state");
/* END CSTYLED */
extern arc_state_t ARC_mfu_ghost;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu_ghost, 0, param_get_arc_state_size, "Q",
"size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu ghost state");
/* END CSTYLED */
extern arc_state_t ARC_uncached;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_uncached, 0, param_get_arc_state_size, "Q",
"size of uncached state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD,
&ARC_uncached.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
"size of evictable metadata in uncached state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD,
&ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in uncached state");
/* END CSTYLED */
extern arc_state_t ARC_l2c_only;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_l2c_only, 0, param_get_arc_state_size, "Q",
"size of l2c_only state");
/* END CSTYLED */
/* dbuf.c */
/* dmu.c */
/* dmu_zfetch.c */
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
extern uint32_t zfetch_max_distance;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
CTLFLAG_RWTUN, &zfetch_max_distance, 0,
"Max bytes to prefetch per stream (LEGACY)");
/* END CSTYLED */
extern uint32_t zfetch_max_idistance;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
"Max bytes to prefetch indirects for per stream (LEGACY)");
/* END CSTYLED */
/* dsl_pool.c */
/* dnode.c */
/* dsl_scan.c */
/* metaslab.c */
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
extern int zfs_metaslab_sm_blksz_no_log;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
"Block size for space map in pools with log space map disabled. "
"Power of 2 greater than 4096.");
/* END CSTYLED */
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
extern int zfs_metaslab_sm_blksz_with_log;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
"Block size for space map in pools with log space map enabled. "
"Power of 2 greater than 4096.");
/* END CSTYLED */
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
extern uint_t zfs_condense_pct;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
CTLFLAG_RWTUN, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart");
/* END CSTYLED */
extern uint_t zfs_remove_max_segment;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
"Largest contiguous segment ZFS will attempt to allocate when removing"
" a device");
/* END CSTYLED */
extern int zfs_removal_suspend_progress;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
"Ensures certain actions can happen while in the middle of a removal");
/* END CSTYLED */
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
extern uint64_t metaslab_df_alloc_threshold;
/* BEGIN CSTYLED */
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change its"
" allocation strategy");
/* END CSTYLED */
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
extern uint_t metaslab_df_free_pct;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a"
" space map to continue allocations in a first-fit fashion");
/* END CSTYLED */
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
extern int metaslab_load_pct;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct,
CTLFLAG_RWTUN, &metaslab_load_pct, 0,
"Percentage of cpus that can be used by the metaslab taskq");
/* END CSTYLED */
/*
* Max number of metaslabs per group to preload.
*/
extern uint_t metaslab_preload_limit;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, preload_limit,
CTLFLAG_RWTUN, &metaslab_preload_limit, 0,
"Max number of metaslabs per group to preload");
/* END CSTYLED */
/* mmp.c */
int
param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
{
int err;
err = sysctl_handle_64(oidp, &zfs_multihost_interval, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (spa_mode_global != SPA_MODE_UNINIT)
mmp_signal_all_threads();
return (0);
}
/* spa.c */
extern int zfs_ccw_retry_interval;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
"Configuration cache file write, retry after failure, interval"
" (seconds)");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_cachefile;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
"Allow importing pools with missing top-level vdevs in cache file");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_scan;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
"Allow importing pools with missing top-level vdevs during scan");
/* END CSTYLED */
/* spa_misc.c */
extern int zfs_flags;
static int
sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
{
int err, val;
val = zfs_flags;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
/*
* ZFS_DEBUG_MODIFY must be enabled prior to boot so all
* arc buffers in the system have the necessary additional
* checksum data. However, it is safe to disable at any
* time.
*/
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
val &= ~ZFS_DEBUG_MODIFY;
zfs_flags = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags,
CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0,
sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing.");
/* END CSTYLED */
int
param_set_deadman_synctime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_synctime_ms;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_synctime_ms = val;
spa_set_deadman_synctime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_ziotime(SYSCTL_HANDLER_ARGS)
{
unsigned long val;
int err;
val = zfs_deadman_ziotime_ms;
err = sysctl_handle_64(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
zfs_deadman_ziotime_ms = val;
spa_set_deadman_ziotime(MSEC2NSEC(zfs_deadman_synctime_ms));
return (0);
}
int
param_set_deadman_failmode(SYSCTL_HANDLER_ARGS)
{
char buf[16];
int rc;
if (req->newptr == NULL)
strlcpy(buf, zfs_deadman_failmode, sizeof (buf));
rc = sysctl_handle_string(oidp, buf, sizeof (buf), req);
if (rc || req->newptr == NULL)
return (rc);
if (strcmp(buf, zfs_deadman_failmode) == 0)
return (0);
if (strcmp(buf, "wait") == 0)
zfs_deadman_failmode = "wait";
if (strcmp(buf, "continue") == 0)
zfs_deadman_failmode = "continue";
if (strcmp(buf, "panic") == 0)
zfs_deadman_failmode = "panic";
return (-param_set_deadman_failmode_common(buf));
}
int
param_set_slop_shift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = spa_slop_shift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (err);
if (val < 1 || val > 31)
return (EINVAL);
spa_slop_shift = val;
return (0);
}
/* spacemap.c */
extern int space_map_ibs;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
&space_map_ibs, 0, "Space map indirect block shift");
/* END CSTYLED */
/* vdev.c */
int
param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = zfs_vdev_min_auto_ashift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_min_auto_ashift = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
param_set_min_auto_ashift, "IU",
"Min ashift used when creating new top-level vdev. (LEGACY)");
/* END CSTYLED */
int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{
int val;
int err;
val = zfs_vdev_max_auto_ashift;
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == NULL)
return (SET_ERROR(err));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(EINVAL));
zfs_vdev_max_auto_ashift = val;
return (0);
}
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
param_set_max_auto_ashift, "IU",
"Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)");
/* END CSTYLED */
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
extern int zfs_vdev_dtl_sm_blksz;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
"Block size for DTL space map. Power of 2 greater than 4096.");
/* END CSTYLED */
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
extern int zfs_vdev_standard_sm_blksz;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096.");
/* END CSTYLED */
extern int vdev_validate_skip;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
CTLFLAG_RDTUN, &vdev_validate_skip, 0,
"Enable to bypass vdev_validate().");
/* END CSTYLED */
-/* vdev_cache.c */
-
/* vdev_mirror.c */
/* vdev_queue.c */
extern uint_t zfs_vdev_max_active;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device."
" (LEGACY)");
/* END CSTYLED */
/* zio.c */
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index a077076927a1..20466aeaaa05 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1,2677 +1,2675 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <acl/acl_common.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
static size_t
zfs_ace_v0_size(void *acep)
{
(void) acep;
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
static int
zfs_ace_v0_data(void *acep, void **datap)
{
(void) acep;
*datap = NULL;
return (0);
}
static const acl_ops_t zfs_acl_v0_ops = {
zfs_ace_v0_get_mask,
zfs_ace_v0_set_mask,
zfs_ace_v0_get_flags,
zfs_ace_v0_set_flags,
zfs_ace_v0_get_type,
zfs_ace_v0_set_type,
zfs_ace_v0_get_who,
zfs_ace_v0_set_who,
zfs_ace_v0_size,
zfs_ace_v0_abstract_size,
zfs_ace_v0_mask_off,
zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
zfs_fallthrough;
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static const acl_ops_t zfs_acl_fuid_ops = {
zfs_ace_fuid_get_mask,
zfs_ace_fuid_set_mask,
zfs_ace_fuid_get_flags,
zfs_ace_fuid_set_flags,
zfs_ace_fuid_get_type,
zfs_ace_fuid_set_type,
zfs_ace_fuid_get_who,
zfs_ace_fuid_set_who,
zfs_ace_fuid_size,
zfs_ace_fuid_abstract_size,
zfs_ace_fuid_mask_off,
zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(zp->z_zfsvfs->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
- while ((aclnode = list_head(&aclp->z_acl))) {
- list_remove(&aclp->z_acl, aclnode);
+ while ((aclnode = list_remove_head(&aclp->z_acl)))
zfs_acl_node_free(aclnode);
- }
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (obj_type == VDIR &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT3P(aclp, !=, NULL);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
static uintptr_t
zfs_ace_walk(void *datap, uintptr_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
(void) aclcnt;
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
memcpy(zobjacep->z_inherit_type,
aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
memcpy(objacep->a_obj_type,
zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
memcpy(objacep->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT3U(aclp->z_version, ==, ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* everytime.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY0(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr));
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize;
int acl_count;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(zp->z_zfsvfs->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
return (error);
}
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
(void) buflen;
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
ASSERT3P(cb->cb_acl_node, !=, NULL);
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, zp->z_uid, zp->z_gid);
return (error);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
zp->z_uid, zp->z_gid);
zp->z_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT3U(aclp->z_version, >=, ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(vtype_t vtype, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
boolean_t isdir;
trivial_acl_t masks;
new_count = new_bytes = 0;
isdir = (vtype == VDIR);
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions granted by ACEs to be no greater
* than permissions of the requested group mode.
* Applies when the "aclmode" property is set to
* "groupmask".
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(ZTOV(zp)->v_type, mode, B_TRUE,
(zp->z_zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!((vtype == VDIR) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = (vtype == VDIR);
boolean_t isreg = (vtype == VREG);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || vtype == VLNK)
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(vtype, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
data2sz = aclp->z_ops->ace_data(acep, &data2);
VERIFY3U(data2sz, ==, data1sz);
memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zidmap_t *mnt_ns)
{
int error;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zfs_acl_t *paclp;
gid_t gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
if ((flag & IS_ROOT_NODE) == 0) {
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
ASSERT3P(dzp->z_vnode, ==, NULL);
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr,
&acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (vap->va_type == VDIR))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr,
ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr,
ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
uid_t id = crgetuid(cr);
if (IS_EPHEMERAL(id))
id = UID_NOBODY;
acl_ids->z_fuid = (uint64_t)id;
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != dzp->z_gid &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
const char *domain;
uint32_t rid;
acl_ids->z_fgid = dzp->z_gid;
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain =
zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid, FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
}
}
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(vap->va_type == VDIR)) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(ZTOV(dzp), cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (vap->va_type == VDIR)
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_type, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr, NULL)))
return (error);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT3U((caddr_t)start - (caddr_t)vsecp->vsa_aclentp,
==, aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_type,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_type, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr, NULL)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(error);
ASSERT3P(zp->z_acl_cached, ==, NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) &&
(zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
(!IS_DEVVP(ZTOV(zp)) || (v4_mode & WRITE_MASK_ATTRS))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
/*
* In FreeBSD we allow to modify directory's content is ZFS_NOUNLINK
* (sunlnk) is set. We just don't allow directory removal, which is
* handled in zfs_zaccess_delete().
*/
if ((v4_mode & ACE_DELETE) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (EPERM);
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCESS if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT3P(zp->z_acl_cached, !=, NULL);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
zfs_fallthrough;
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != UID_NOBODY &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
}
return (B_TRUE);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(ZTOV(zp)->v_type != VDIR) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
/*
* Check if VEXEC is allowed.
*
* This routine is based on zfs_fastaccesschk_execute which has slowpath
* calling zfs_zaccess. This would be incorrect on FreeBSD (see
* zfs_freebsd_access for the difference). Thus this variant let's the
* caller handle the slowpath (if necessary).
*
* On top of that we perform a lockless check for ZFS_NO_EXECS_DENIED.
*
* Safe access to znode_t is provided by the vnode lock.
*/
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t is_attr;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (1);
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(ZTOV(zdp)->v_type == VDIR));
if (is_attr)
return (1);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED)
return (0);
return (1);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
zidmap_t *mnt_ns)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp = NULL;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
/*
* In FreeBSD, we don't care about permissions of individual ADS.
* Note that not checking them is not just an optimization - without
* this shortcut, EA operations may bogusly fail with EACCES.
*/
if (zp->z_pflags & ZFS_XATTR)
return (0);
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
/*
* Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
* in needed_bits. Map the bits mapped by working_mode (currently
* missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VWRITE;
if (working_mode & ACE_EXECUTE)
needed_bits |= VEXEC;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
vnode_t *check_vp = ZTOV(check_zp);
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
ASSERT3U(working_mode, !=, 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VWRITE;
if (working_mode & ACE_EXECUTE)
checkmode |= VEXEC;
error = secpolicy_vnode_access2(cr, check_vp, owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(check_vp, cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(check_vp, cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(check_vp, cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(check_vp, cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
/*
* Translate traditional unix VREAD/VWRITE/VEXEC mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr,
zidmap_t *mnt_ns)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr,
mnt_ns));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(void *zp, int mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr, NULL));
}
static int
zfs_delete_final_check(znode_t *zp, znode_t *dzp,
mode_t available_perms, cred_t *cr)
{
int error;
uid_t downer;
downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
error = secpolicy_vnode_access2(cr, ZTOV(dzp),
downer, available_perms, VWRITE|VEXEC);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
return (error);
}
/*
* Determine whether Access should be granted/deny, without
* consulting least priv subsystem.
*
* The following chart is the recommended NFSv4 enforcement for
* ability to delete an object.
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Permit | Permit |
* | DELETE_CHILD | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Permit | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* No search privilege, can't even look up file?
*
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zidmap_t *mnt_ns)
{
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
mode_t available_perms;
boolean_t dzpcheck_privs = B_TRUE;
boolean_t zpcheck_privs = B_TRUE;
/*
* We want specific DELETE permissions to
* take precedence over WRITE/EXECUTE. We don't
* want an ACL such as this to mess us up.
* user:joe:write_data:deny,user:joe:delete:allow
*
* However, deny permissions may ultimately be overridden
* by secpolicy_vnode_access().
*
* We will ask for all of the necessary permissions and then
* look at the working modes from the directory and target object
* to determine what was found.
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* First row
* If the directory permissions allow the delete, we are done.
*/
if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0)
return (0);
/*
* If target object has delete permission then we are done
*/
if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr)) == 0)
return (0);
ASSERT(dzp_error);
ASSERT(zp_error);
if (!dzpcheck_privs)
return (dzp_error);
if (!zpcheck_privs)
return (zp_error);
/*
* Second row
*
* If directory returns EACCES then delete_child was denied
* due to deny delete_child. In this case send the request through
* secpolicy_vnode_remove(). We don't use zfs_delete_final_check()
* since that *could* allow the delete based on write/execute permission
* and we want delete permissions to override write/execute.
*/
if (dzp_error == EACCES) {
/* XXXPJD: s/dzp/zp/ ? */
return (secpolicy_vnode_remove(ZTOV(dzp), cr));
}
/*
* Third Row
* only need to see if we have write/execute on directory.
*/
dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error != 0 && !dzpcheck_privs)
return (dzp_error);
/*
* Fourth row
*/
available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE;
available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC;
return (zfs_delete_final_check(zp, dzp, available_perms, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr, zidmap_t *mnt_ns)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = (ZTOV(szp)->v_type == VDIR) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*
* BSD operating systems also require write permission
* on the directory being moved from one parent directory
* to another.
*/
if (ZTOV(szp)->v_type == VDIR && ZTOV(sdzp) != ZTOV(tdzp)) {
if ((error = zfs_zaccess(szp, ACE_WRITE_DATA, 0, B_FALSE, cr,
mnt_ns)))
return (error);
}
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr, mnt_ns)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp && (error = zfs_zaccess_delete(tdzp, tzp, cr, mnt_ns)))
return (error);
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr, mnt_ns);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index b63899ddede0..49b97ae8f590 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -1,2607 +1,2521 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/acl.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/jail.h>
#include <sys/osd.h>
#include <ufs/ufs/quota.h>
#include <sys/zfs_quota.h>
#include "zfs_comutil.h"
#ifndef MNTK_VMSETSIZE_BUG
#define MNTK_VMSETSIZE_BUG 0
#endif
#ifndef MNTK_NOMSYNC
#define MNTK_NOMSYNC 8
#endif
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");
int zfs_super_owner;
SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owners can perform privileged operation on file systems");
int zfs_debug_level;
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0,
"Debug level");
int zfs_bclone_enabled;
SYSCTL_INT(_vfs_zfs, OID_AUTO, bclone_enabled, CTLFLAG_RWTUN,
&zfs_bclone_enabled, 0, "Enable block cloning");
struct zfs_jailparam {
int mount_snapshot;
};
static struct zfs_jailparam zfs_jailparam0 = {
.mount_snapshot = 0,
};
static int zfs_jailparam_slot;
SYSCTL_JAIL_PARAM_SYS_NODE(zfs, CTLFLAG_RW, "Jail ZFS parameters");
SYSCTL_JAIL_PARAM(_zfs, mount_snapshot, CTLTYPE_INT | CTLFLAG_RW, "I",
"Allow mounting snapshots in the .zfs directory for unjailed datasets");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");
static int zfs_version_acl = ZFS_ACL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, acl, CTLFLAG_RD, &zfs_version_acl, 0,
"ZFS_ACL_VERSION");
static int zfs_version_spa = SPA_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, spa, CTLFLAG_RD, &zfs_version_spa, 0,
"SPA_VERSION");
static int zfs_version_zpl = ZPL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, &zfs_version_zpl, 0,
"ZPL_VERSION");
#if __FreeBSD_version >= 1400018
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg,
bool *mp_busy);
#else
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg);
#endif
static int zfs_mount(vfs_t *vfsp);
static int zfs_umount(vfs_t *vfsp, int fflag);
static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
static int zfs_sync(vfs_t *vfsp, int waitfor);
#if __FreeBSD_version >= 1300098
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors);
#else
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors);
#endif
static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp);
static void zfs_freevfs(vfs_t *vfsp);
struct vfsops zfs_vfsops = {
.vfs_mount = zfs_mount,
.vfs_unmount = zfs_umount,
#if __FreeBSD_version >= 1300049
.vfs_root = vfs_cache_root,
.vfs_cachedroot = zfs_root,
#else
.vfs_root = zfs_root,
#endif
.vfs_statfs = zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
.vfs_checkexp = zfs_checkexp,
.vfs_fhtovp = zfs_fhtovp,
.vfs_quotactl = zfs_quotactl,
};
#ifdef VFCF_CROSS_COPY_FILE_RANGE
VFS_SET(zfs_vfsops, zfs,
VFCF_DELEGADMIN | VFCF_JAIL | VFCF_CROSS_COPY_FILE_RANGE);
#else
VFS_SET(zfs_vfsops, zfs, VFCF_DELEGADMIN | VFCF_JAIL);
#endif
/*
* We need to keep a count of active fs's.
* This is necessary to prevent our module
* from being unloaded after a umount -f
*/
static uint32_t zfs_active_fs_count = 0;
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, &zfvp);
if (error != 0)
return (error);
if (zfvp == NULL)
return (ENOENT);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL))
tmp = 1;
break;
case ZFS_PROP_DEVICES:
if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL))
tmp = 1;
break;
case ZFS_PROP_EXEC:
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL))
tmp = 1;
break;
case ZFS_PROP_SETUID:
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL))
tmp = 1;
break;
case ZFS_PROP_READONLY:
if (vfs_optionisset(vfsp, MNTOPT_RW, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
tmp = 1;
break;
case ZFS_PROP_XATTR:
if (zfvp->z_flags & ZSB_XATTR)
tmp = zfvp->z_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
tmp = 1;
break;
default:
vfs_unbusy(vfsp);
return (ENOENT);
}
vfs_unbusy(vfsp);
if (tmp != *val) {
if (setpoint)
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
static int
zfs_getquota(zfsvfs_t *zfsvfs, uid_t id, int isgroup, struct dqblk64 *dqp)
{
int error = 0;
char buf[32];
uint64_t usedobj, quotaobj;
uint64_t quota, used = 0;
timespec_t now;
usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
if (quotaobj == 0 || zfsvfs->z_replay) {
error = ENOENT;
goto done;
}
(void) sprintf(buf, "%llx", (longlong_t)id);
if ((error = zap_lookup(zfsvfs->z_os, quotaobj,
buf, sizeof (quota), 1, &quota)) != 0) {
dprintf("%s(%d): quotaobj lookup failed\n",
__FUNCTION__, __LINE__);
goto done;
}
/*
* quota(8) uses bsoftlimit as "quoota", and hardlimit as "limit".
* So we set them to be the same.
*/
dqp->dqb_bsoftlimit = dqp->dqb_bhardlimit = btodb(quota);
error = zap_lookup(zfsvfs->z_os, usedobj, buf, sizeof (used), 1, &used);
if (error && error != ENOENT) {
dprintf("%s(%d): usedobj failed; %d\n",
__FUNCTION__, __LINE__, error);
goto done;
}
dqp->dqb_curblocks = btodb(used);
dqp->dqb_ihardlimit = dqp->dqb_isoftlimit = 0;
vfs_timestamp(&now);
/*
* Setting this to 0 causes FreeBSD quota(8) to print
* the number of days since the epoch, which isn't
* particularly useful.
*/
dqp->dqb_btime = dqp->dqb_itime = now.tv_sec;
done:
return (error);
}
static int
#if __FreeBSD_version >= 1400018
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg, bool *mp_busy)
#else
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct thread *td;
int cmd, type, error = 0;
int bitsize;
zfs_userquota_prop_t quota_type;
struct dqblk64 dqblk = { 0 };
td = curthread;
cmd = cmds >> SUBCMDSHIFT;
type = cmds & SUBCMDMASK;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if (id == -1) {
switch (type) {
case USRQUOTA:
id = td->td_ucred->cr_ruid;
break;
case GRPQUOTA:
id = td->td_ucred->cr_rgid;
break;
default:
error = EINVAL;
#if __FreeBSD_version < 1400018
if (cmd == Q_QUOTAON || cmd == Q_QUOTAOFF)
vfs_unbusy(vfsp);
#endif
goto done;
}
}
/*
* Map BSD type to:
* ZFS_PROP_USERUSED,
* ZFS_PROP_USERQUOTA,
* ZFS_PROP_GROUPUSED,
* ZFS_PROP_GROUPQUOTA
*/
switch (cmd) {
case Q_SETQUOTA:
case Q_SETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERQUOTA;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPQUOTA;
else
error = EINVAL;
break;
case Q_GETQUOTA:
case Q_GETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERUSED;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPUSED;
else
error = EINVAL;
break;
}
/*
* Depending on the cmd, we may need to get
* the ruid and domain (see fuidstr_to_sid?),
* the fuid (how?), or other information.
* Create fuid using zfs_fuid_create(zfsvfs, id,
* ZFS_OWNER or ZFS_GROUP, cr, &fuidp)?
* I think I can use just the id?
*
* Look at zfs_id_overquota() to look up a quota.
* zap_lookup(something, quotaobj, fuidstring,
* sizeof (long long), 1, &quota)
*
* See zfs_set_userquota() to set a quota.
*/
if ((uint32_t)type >= MAXQUOTAS) {
error = EINVAL;
goto done;
}
switch (cmd) {
case Q_GETQUOTASIZE:
bitsize = 64;
error = copyout(&bitsize, arg, sizeof (int));
break;
case Q_QUOTAON:
// As far as I can tell, you can't turn quotas on or off on zfs
error = 0;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_QUOTAOFF:
error = ENOTSUP;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_SETQUOTA:
error = copyin(arg, &dqblk, sizeof (dqblk));
if (error == 0)
error = zfs_set_userquota(zfsvfs, quota_type,
"", id, dbtob(dqblk.dqb_bhardlimit));
break;
case Q_GETQUOTA:
error = zfs_getquota(zfsvfs, id, type == GRPQUOTA, &dqblk);
if (error == 0)
error = copyout(&dqblk, arg, sizeof (dqblk));
break;
default:
error = EINVAL;
break;
}
done:
zfs_exit(zfsvfs, FTAG);
return (error);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY));
}
static int
zfs_sync(vfs_t *vfsp, int waitfor)
{
/*
* Data integrity is job one. We don't want a compromised kernel
* writing to the storage pool, so we never sync during panic.
*/
if (panicstr)
return (0);
/*
* Ignore the system syncher. ZFS already commits async data
* at zfs_txg_timeout intervals.
*/
if (waitfor == MNT_LAZY)
return (0);
if (vfsp != NULL) {
/*
* Sync a specific filesystem.
*/
zfsvfs_t *zfsvfs = vfsp->vfs_data;
dsl_pool_t *dp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (rebooting && spa_suspended(dp->dp_spa)) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == TRUE) {
zfsvfs->z_atime = TRUE;
zfsvfs->z_vfs->vfs_flag &= ~MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
} else {
zfsvfs->z_atime = FALSE;
zfsvfs->z_vfs->vfs_flag |= MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
}
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
zfsvfs->z_vfs->mnt_stat.f_iosize = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval) {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
} else {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
}
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
}
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
}
}
/*
* The nbmand mount option can be changed at mount time.
* We can't allow it to be toggled on live file systems or incorrect
* behavior may be seen from cifs clients
*
* This property isn't registered via dsl_prop_register(), but this callback
* will be called when a file system is first mounted
*/
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
} else {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
}
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_inherit = newval;
}
static void
acl_type_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_type = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
uint64_t nbmand;
boolean_t readonly = B_FALSE;
boolean_t do_readonly = B_FALSE;
boolean_t setuid = B_FALSE;
boolean_t do_setuid = B_FALSE;
boolean_t exec = B_FALSE;
boolean_t do_exec = B_FALSE;
boolean_t xattr = B_FALSE;
boolean_t atime = B_FALSE;
boolean_t do_atime = B_FALSE;
boolean_t do_xattr = B_FALSE;
int error = 0;
ASSERT3P(vfsp, !=, NULL);
zfsvfs = vfsp->vfs_data;
ASSERT3P(zfsvfs, !=, NULL);
os = zfsvfs->z_os;
/*
* This function can be called for a snapshot when we update snapshot's
* mount point, which isn't really supported.
*/
if (dmu_objset_is_snapshot(os))
return (EOPNOTSUPP);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
!spa_writeable(dmu_objset_spa(os))) {
readonly = B_TRUE;
do_readonly = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
readonly = B_FALSE;
do_readonly = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
setuid = B_FALSE;
do_setuid = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
setuid = B_TRUE;
do_setuid = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
exec = B_FALSE;
do_exec = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
exec = B_TRUE;
do_exec = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_OFF;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_DIRXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SAXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_SA;
do_xattr = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
atime = B_FALSE;
do_atime = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
atime = B_TRUE;
do_atime = B_TRUE;
}
/*
* We need to enter pool configuration here, so that we can use
* dsl_prop_get_int_ds() to handle the special nbmand property below.
* dsl_prop_get_integer() can not be used, because it has to acquire
* spa_namespace_lock and we can not do that because we already hold
* z_teardown_lock. The problem is that spa_write_cachefile() is called
* with spa_namespace_lock held and the function calls ZFS vnode
* operations to write the cache file and thus z_teardown_lock is
* acquired after spa_namespace_lock.
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
/*
* nbmand is a special property. It can only be changed at
* mount time.
*
* This is weird, but it is documented to only be changeable
* at mount time.
*/
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
nbmand = B_FALSE;
} else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
nbmand = B_TRUE;
} else if ((error = dsl_prop_get_int_ds(ds, "nbmand", &nbmand)) != 0) {
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
return (error);
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acl_type_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (do_readonly)
readonly_changed_cb(zfsvfs, readonly);
if (do_setuid)
setuid_changed_cb(zfsvfs, setuid);
if (do_exec)
exec_changed_cb(zfsvfs, exec);
if (do_xattr)
xattr_changed_cb(zfsvfs, xattr);
if (do_atime)
atime_changed_cb(zfsvfs, atime);
nbmand_changed_cb(zfsvfs, nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printf("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val);
if (error != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if (error == 0 && val == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
}
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT3U(zfsvfs->z_root, !=, 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
/*
* Only use the name cache if we are looking for a
* name on a file system that does not require normalization
* or case folding. We can also look there if we happen to be
* on a non-normalizing, mixed sensitivity file system IF we
* are looking for the exact name (which is always the case on
* FreeBSD).
*/
zfsvfs->z_use_namecache = !zfsvfs->z_norm ||
((zfsvfs->z_case == ZFS_CASE_MIXED) &&
!(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER));
return (0);
}
taskq_t *zfsvfs_taskq;
static void
zfsvfs_task_unlinked_drain(void *context, int pending __unused)
{
zfs_unlinked_drain((zfsvfs_t *)context);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
/*
* XXX: Fix struct statfs so this isn't necessary!
*
* The 'osname' is used as the filesystem's special node, which means
* it must fit in statfs.f_mntfromname, or else it can't be
* enumerated, so libzfs_mnttab_find() returns NULL, which causes
* 'zfs unmount' to think it's not mounted when it is.
*/
if (strlen(osname) >= MNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs,
&os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
TASK_INIT(&zfsvfs->z_unlinked_drain_task, 0,
zfsvfs_task_unlinked_drain, zfsvfs);
ZFS_TEARDOWN_INIT(zfsvfs);
ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
dmu_objset_incompatible_encryption_version(zfsvfs->z_os))
return (SET_ERROR(EROFS));
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
boolean_t readonly;
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
if (readonly != 0) {
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
} else {
dsl_dir_t *dd;
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
(u_longlong_t)zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
boolean_t use_nc = zfsvfs->z_use_namecache;
zfsvfs->z_use_namecache = B_FALSE;
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
zfsvfs->z_use_namecache = use_nc;
}
}
/* restore readonly bit */
if (readonly != 0)
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
uint64_t recordsize, fsid_guid;
int error = 0;
zfsvfs_t *zfsvfs;
ASSERT3P(vfsp, !=, NULL);
ASSERT3P(osname, !=, NULL);
error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs);
if (error)
return (error);
zfsvfs->z_vfs = vfsp;
if ((error = dsl_prop_get_integer(osname,
"recordsize", &recordsize, NULL)))
goto out;
zfsvfs->z_vfs->vfs_bsize = SPA_MINBLOCKSIZE;
zfsvfs->z_vfs->mnt_stat.f_iosize = recordsize;
vfsp->vfs_data = zfsvfs;
vfsp->mnt_flag |= MNT_LOCAL;
vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
vfsp->mnt_kern_flag |= MNTK_EXTENDED_SHARED;
/*
* This can cause a loss of coherence between ARC and page cache
* on ZoF - unclear if the problem is in FreeBSD or ZoF
*/
vfsp->mnt_kern_flag |= MNTK_NO_IOPF; /* vn_io_fault can be used */
vfsp->mnt_kern_flag |= MNTK_NOMSYNC;
vfsp->mnt_kern_flag |= MNTK_VMSETSIZE_BUG;
#if defined(_KERNEL) && !defined(KMEM_DEBUG)
vfsp->mnt_kern_flag |= MNTK_FPLOOKUP;
#endif
/*
* The fsid is 64 bits, composed of an 8-bit fs type, which
* separates our fsid from any other filesystem types, and a
* 56-bit objset unique ID. The objset unique ID is unique to
* all objsets open on this system, provided by unique_create().
* The 8-bit fs type must be put in the low bits of fsid[1]
* because that's where other Solaris filesystems put it.
*/
fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
ASSERT3U((fsid_guid & ~((1ULL << 56) - 1)), ==, 0);
vfsp->vfs_fsid.val[0] = fsid_guid;
vfsp->vfs_fsid.val[1] = ((fsid_guid >> 32) << 8) |
(vfsp->mnt_vfc->vfc_typenum & 0xFF);
/*
* Set features for file system.
*/
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acl_type_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
vfs_mountedfrom(vfsp, osname);
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
out:
if (error) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
} else {
atomic_inc_32(&zfs_active_fs_count);
}
return (error);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
static int
getpoolname(const char *osname, char *poolname)
{
char *p;
p = strchr(osname, '/');
if (p == NULL) {
if (strlen(osname) >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strcpy(poolname, osname);
} else {
if (p - osname >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strlcpy(poolname, osname, p - osname + 1);
}
return (0);
}
static void
fetch_osname_options(char *name, bool *checkpointrewind)
{
if (name[0] == '!') {
*checkpointrewind = true;
memmove(name, name + 1, strlen(name));
} else {
*checkpointrewind = false;
}
}
static int
zfs_mount(vfs_t *vfsp)
{
kthread_t *td = curthread;
vnode_t *mvp = vfsp->mnt_vnodecovered;
cred_t *cr = td->td_ucred;
char *osname;
int error = 0;
int canwrite;
bool checkpointrewind, isctlsnap = false;
if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
return (SET_ERROR(EINVAL));
/*
* If full-owner-access is enabled and delegated administration is
* turned on, we must set nosuid.
*/
if (zfs_super_owner &&
dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
secpolicy_fs_mount_clearopts(cr, vfsp);
}
fetch_osname_options(osname, &checkpointrewind);
isctlsnap = (mvp != NULL && zfsctl_is_node(mvp) &&
strchr(osname, '@') != NULL);
/*
* Check for mount privilege?
*
* If we don't have privilege then see if
* we have local permission to allow it
*/
error = secpolicy_fs_mount(cr, mvp, vfsp);
if (error && isctlsnap) {
secpolicy_fs_mount_clearopts(cr, vfsp);
} else if (error) {
if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != 0)
goto out;
if (!(vfsp->vfs_flag & MS_REMOUNT)) {
vattr_t vattr;
/*
* Make sure user is the owner of the mount point
* or has sufficient privileges.
*/
vattr.va_mask = AT_UID;
vn_lock(mvp, LK_SHARED | LK_RETRY);
if (VOP_GETATTR(mvp, &vattr, cr)) {
VOP_UNLOCK1(mvp);
goto out;
}
if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
VOP_UNLOCK1(mvp);
goto out;
}
VOP_UNLOCK1(mvp);
}
secpolicy_fs_mount_clearopts(cr, vfsp);
}
/*
* Refuse to mount a filesystem if we are in a local zone and the
* dataset is not visible.
*/
if (!INGLOBALZONE(curproc) &&
(!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
boolean_t mount_snapshot = B_FALSE;
/*
* Snapshots may be mounted in .zfs for unjailed datasets
* if allowed by the jail param zfs.mount_snapshot.
*/
if (isctlsnap) {
struct prison *pr;
struct zfs_jailparam *zjp;
pr = curthread->td_ucred->cr_prison;
mtx_lock(&pr->pr_mtx);
zjp = osd_jail_get(pr, zfs_jailparam_slot);
mtx_unlock(&pr->pr_mtx);
if (zjp && zjp->mount_snapshot)
mount_snapshot = B_TRUE;
}
if (!mount_snapshot) {
error = SET_ERROR(EPERM);
goto out;
}
}
vfsp->vfs_flag |= MNT_NFS4ACLS;
/*
* When doing a remount, we simply refresh our temporary properties
* according to those options set in the current VFS options.
*/
if (vfsp->vfs_flag & MS_REMOUNT) {
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* Refresh mount options with z_teardown_lock blocking I/O while
* the filesystem is in an inconsistent state.
* The lock also serializes this code with filesystem
* manipulations between entry to zfs_suspend_fs() and return
* from zfs_resume_fs().
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfs_unregister_callbacks(zfsvfs);
error = zfs_register_callbacks(vfsp);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
goto out;
}
/* Initial root mount: try hard to import the requested root pool. */
if ((vfsp->vfs_flag & MNT_ROOTFS) != 0 &&
(vfsp->vfs_flag & MNT_UPDATE) == 0) {
char pname[MAXNAMELEN];
error = getpoolname(osname, pname);
if (error == 0)
error = spa_import_rootpool(pname, checkpointrewind);
if (error)
goto out;
}
DROP_GIANT();
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
out:
return (error);
}
static int
zfs_statfs(vfs_t *vfsp, struct statfs *statp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int error;
statp->f_version = STATFS_VERSION;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
/*
* The underlying storage pool actually uses multiple block sizes.
* We report the fragsize as the smallest block size we support,
* and we report our blocksize as the filesystem's maximum blocksize.
*/
statp->f_bsize = SPA_MINBLOCKSIZE;
statp->f_iosize = zfsvfs->z_vfs->mnt_stat.f_iosize;
/*
* The following report "total" blocks of various kinds in the
* file system, but reported in terms of f_frsize - the
* "fragment" size.
*/
statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
statp->f_bfree = availbytes / statp->f_bsize;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of object available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, statp->f_bfree);
statp->f_files = statp->f_ffree + usedobjs;
/*
* We're a zfs filesystem.
*/
strlcpy(statp->f_fstypename, "zfs",
sizeof (statp->f_fstypename));
strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname,
sizeof (statp->f_mntfromname));
strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname,
sizeof (statp->f_mntonname));
statp->f_namemax = MAXNAMELEN - 1;
zfs_exit(zfsvfs, FTAG);
return (0);
}
static int
zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*vpp = ZTOV(rootzp);
zfs_exit(zfsvfs, FTAG);
if (error == 0) {
error = vn_lock(*vpp, flags);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
}
}
return (error);
}
/*
* Teardown the zfsvfs::z_os.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
dsl_dir_t *dd;
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but zreles run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's vfsp as the parent
* filesystem and all of its snapshots have their vnode's
* v_vfsp set to the parent's filesystem's vfsp. Note,
* 'z_parent' is self referential for non-snapshots.
*/
#ifdef FREEBSD_NAMECACHE
#if __FreeBSD_version >= 1300117
cache_purgevfs(zfsvfs->z_parent->z_vfs);
#else
cache_purgevfs(zfsvfs->z_parent->z_vfs, true);
#endif
#endif
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no vops active, and any new vops will
* fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl != NULL) {
zfs_znode_dmu_fini(zp);
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
/*
* If we are unmounting, set the unmounted flag and let new vops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other vops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data
*/
if (!zfs_is_readonly(zfsvfs))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
dmu_objset_evict_dbufs(zfsvfs->z_os);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
kthread_t *td = curthread;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
objset_t *os;
cred_t *cr = td->td_ucred;
int ret;
ret = secpolicy_fs_unmount(cr, vfsp);
if (ret) {
if (dsl_deleg_access((char *)vfsp->vfs_resource,
ZFS_DELEG_PERM_MOUNT, cr))
return (ret);
}
/*
* Unmount any snapshots mounted under .zfs before unmounting the
* dataset itself.
*/
if (zfsvfs->z_ctldir != NULL) {
if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
return (ret);
}
if (fflag & MS_FORCE) {
/*
* Mark file system as unmounted before calling
* vflush(FORCECLOSE). This way we ensure no future vnops
* will be called and risk operating on DOOMED vnodes.
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* Flush all the files.
*/
ret = vflush(vfsp, 0, (fflag & MS_FORCE) ? FORCECLOSE : 0, td);
if (ret != 0)
return (ret);
while (taskqueue_cancel(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task, NULL) != 0)
taskqueue_drain(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task);
VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
/*
* We can now safely destroy the '.zfs' directory node.
*/
if (zfsvfs->z_ctldir != NULL)
zfsctl_destroy(zfsvfs);
zfs_freevfs(vfsp);
return (0);
}
static int
zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
int err;
/*
* zfs_zget() can't operate on virtual entries like .zfs/ or
* .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
* This will make NFS to switch to LOOKUP instead of using VGET.
*/
if (ino == ZFSCTL_INO_ROOT || ino == ZFSCTL_INO_SNAPDIR ||
(zfsvfs->z_shares_dir != 0 && ino == zfsvfs->z_shares_dir))
return (EOPNOTSUPP);
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
err = zfs_zget(zfsvfs, ino, &zp);
if (err == 0 && zp->z_unlinked) {
vrele(ZTOV(zp));
err = EINVAL;
}
if (err == 0)
*vpp = ZTOV(zp);
zfs_exit(zfsvfs, FTAG);
if (err == 0) {
err = vn_lock(*vpp, flags);
if (err != 0)
vrele(*vpp);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
#if __FreeBSD_version >= 1300098
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors)
#else
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* If this is regular file system vfsp is the same as
* zfsvfs->z_parent->z_vfs, but if it is snapshot,
* zfsvfs->z_parent->z_vfs represents parent file system
* which we have to use here, because only this file system
* has mnt_export configured.
*/
return (vfs_stdcheckexp(zfsvfs->z_parent->z_vfs, nam, extflagsp,
credanonp, numsecflavors, secflavors));
}
_Static_assert(sizeof (struct fid) >= SHORT_FID_LEN,
"struct fid bigger than SHORT_FID_LEN");
_Static_assert(sizeof (struct fid) >= LONG_FID_LEN,
"struct fid bigger than LONG_FID_LEN");
static int
zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
{
struct componentname cn;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
vnode_t *dvp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t setgen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*vpp = NULL;
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
/*
* On FreeBSD we can get snapshot's mount point or its parent file
* system mount point depending if snapshot is already mounted or not.
*/
if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
zfs_exit(zfsvfs, FTAG);
err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
if (err)
return (SET_ERROR(EINVAL));
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
}
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
if (fidp->fid_len == LONG_FID_LEN && setgen != 0) {
zfs_exit(zfsvfs, FTAG);
dprintf("snapdir fid: fid_gen (%llu) and setgen (%llu)\n",
(u_longlong_t)fid_gen, (u_longlong_t)setgen);
return (SET_ERROR(EINVAL));
}
/*
* A zero fid_gen means we are in .zfs or the .zfs/snapshot
* directory tree. If the object == zfsvfs->z_shares_dir, then
* we are in the .zfs/shares directory tree.
*/
if ((fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) ||
(zfsvfs->z_shares_dir != 0 && object == zfsvfs->z_shares_dir)) {
zfs_exit(zfsvfs, FTAG);
VERIFY0(zfsctl_root(zfsvfs, LK_SHARED, &dvp));
if (object == ZFSCTL_INO_SNAPDIR) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN | LOCKLEAF;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else if (object == zfsvfs->z_shares_dir) {
/*
* XXX This branch must not be taken,
* if it is, then the lookup below will
* explode.
*/
cn.cn_nameptr = "shares";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else {
*vpp = dvp;
}
return (err);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t)object,
(u_longlong_t)fid_gen,
(u_longlong_t)gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n",
(u_longlong_t)zp_gen, (u_longlong_t)fid_gen);
vrele(ZTOV(zp));
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
}
*vpp = ZTOV(zp);
zfs_exit(zfsvfs, FTAG);
err = vn_lock(*vpp, flags);
if (err == 0)
vnode_create_vobject(*vpp, zp->z_size, curthread);
else
*vpp = NULL;
return (err);
}
/*
* Block out VOPs and close zfsvfs_t::z_os
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
/*
* Attempt to re-establish all the active znodes with
* their dbufs. If a zfs_rezget() fails, then we'll let
* any potential callers discover that via zfs_enter_verify_zp
* when they try to use their znode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
(void) zfs_rezget(zp);
}
mutex_exit(&zfsvfs->z_znodes_lock);
bail:
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0) {
vfs_ref(zfsvfs->z_vfs);
(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
}
}
return (err);
}
static void
zfs_freevfs(vfs_t *vfsp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
zfsvfs_free(zfsvfs);
atomic_dec_32(&zfs_active_fs_count);
}
#ifdef __i386__
static int desiredvnodes_backup;
#include <sys/vmmeter.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#endif
static void
zfs_vnodes_adjust(void)
{
#ifdef __i386__
int newdesiredvnodes;
desiredvnodes_backup = desiredvnodes;
/*
* We calculate newdesiredvnodes the same way it is done in
* vntblinit(). If it is equal to desiredvnodes, it means that
* it wasn't tuned by the administrator and we can tune it down.
*/
newdesiredvnodes = min(maxproc + vm_cnt.v_page_count / 4, 2 *
vm_kmem_size / (5 * (sizeof (struct vm_object) +
sizeof (struct vnode))));
if (newdesiredvnodes == desiredvnodes)
desiredvnodes = (3 * newdesiredvnodes) / 4;
#endif
}
static void
zfs_vnodes_adjust_back(void)
{
#ifdef __i386__
desiredvnodes = desiredvnodes_backup;
#endif
}
void
zfs_init(void)
{
printf("ZFS filesystem version: " ZPL_VERSION_STRING "\n");
/*
* Initialize .zfs directory structures
*/
zfsctl_init();
/*
* Initialize znode cache, vnode ops, etc...
*/
zfs_znode_init();
/*
* Reduce number of vnodes. Originally number of vnodes is calculated
* with UFS inode in mind. We reduce it here, because it's too big for
* ZFS/i386.
*/
zfs_vnodes_adjust();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
}
void
zfs_fini(void)
{
taskq_destroy(zfsvfs_taskq);
zfsctl_fini();
zfs_znode_fini();
zfs_vnodes_adjust_back();
}
int
zfs_busy(void)
{
return (zfs_active_fs_count != 0);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_vfs, 0);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %ju to %ju", (uintmax_t)zfsvfs->z_version,
(uintmax_t)newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
-/*
- * Read a property stored within the master node.
- */
-int
-zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
-{
- uint64_t *cached_copy = NULL;
-
- /*
- * Figure out where in the objset_t the cached copy would live, if it
- * is available for the requested property.
- */
- if (os != NULL) {
- switch (prop) {
- case ZFS_PROP_VERSION:
- cached_copy = &os->os_version;
- break;
- case ZFS_PROP_NORMALIZE:
- cached_copy = &os->os_normalization;
- break;
- case ZFS_PROP_UTF8ONLY:
- cached_copy = &os->os_utf8only;
- break;
- case ZFS_PROP_CASE:
- cached_copy = &os->os_casesensitivity;
- break;
- default:
- break;
- }
- }
- if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
- *value = *cached_copy;
- return (0);
- }
-
- /*
- * If the property wasn't cached, look up the file system's value for
- * the property. For the version property, we look up a slightly
- * different string.
- */
- const char *pname;
- int error = ENOENT;
- if (prop == ZFS_PROP_VERSION) {
- pname = ZPL_VERSION_STR;
- } else {
- pname = zfs_prop_to_name(prop);
- }
-
- if (os != NULL) {
- ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
- error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
- }
-
- if (error == ENOENT) {
- /* No value set, use the default value */
- switch (prop) {
- case ZFS_PROP_VERSION:
- *value = ZPL_VERSION;
- break;
- case ZFS_PROP_NORMALIZE:
- case ZFS_PROP_UTF8ONLY:
- *value = 0;
- break;
- case ZFS_PROP_CASE:
- *value = ZFS_CASE_SENSITIVE;
- break;
- case ZFS_PROP_ACLTYPE:
- *value = ZFS_ACLTYPE_NFSV4;
- break;
- default:
- return (error);
- }
- error = 0;
- }
-
- /*
- * If one of the methods for getting the property value above worked,
- * copy it into the objset_t's cache.
- */
- if (error == 0 && cached_copy != NULL) {
- *cached_copy = *value;
- }
-
- return (error);
-}
-
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT3U(dmu_objset_type(os), ==, DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_vfs != NULL &&
(zfvp->z_vfs->mnt_kern_flag & MNTK_UNMOUNT))
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
#ifdef _KERNEL
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
char tmpbuf[MAXPATHLEN];
struct mount *mp;
char *fromname;
size_t oldlen;
oldlen = strlen(oldname);
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
fromname = mp->mnt_stat.f_mntfromname;
if (strcmp(fromname, oldname) == 0) {
(void) strlcpy(fromname, newname,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
if (strncmp(fromname, oldname, oldlen) == 0 &&
(fromname[oldlen] == '/' || fromname[oldlen] == '@')) {
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%s",
newname, fromname + oldlen);
(void) strlcpy(fromname, tmpbuf,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
}
mtx_unlock(&mountlist_mtx);
}
#endif
/*
* Find a prison with ZFS info.
* Return the ZFS info and the (locked) prison.
*/
static struct zfs_jailparam *
zfs_jailparam_find(struct prison *spr, struct prison **prp)
{
struct prison *pr;
struct zfs_jailparam *zjp;
for (pr = spr; ; pr = pr->pr_parent) {
mtx_lock(&pr->pr_mtx);
if (pr == &prison0) {
zjp = &zfs_jailparam0;
break;
}
zjp = osd_jail_get(pr, zfs_jailparam_slot);
if (zjp != NULL)
break;
mtx_unlock(&pr->pr_mtx);
}
*prp = pr;
return (zjp);
}
/*
* Ensure a prison has its own ZFS info. If zjpp is non-null, point it to the
* ZFS info and lock the prison.
*/
static void
zfs_jailparam_alloc(struct prison *pr, struct zfs_jailparam **zjpp)
{
struct prison *ppr;
struct zfs_jailparam *zjp, *nzjp;
void **rsv;
/* If this prison already has ZFS info, return that. */
zjp = zfs_jailparam_find(pr, &ppr);
if (ppr == pr)
goto done;
/*
* Allocate a new info record. Then check again, in case something
* changed during the allocation.
*/
mtx_unlock(&ppr->pr_mtx);
nzjp = malloc(sizeof (struct zfs_jailparam), M_PRISON, M_WAITOK);
rsv = osd_reserve(zfs_jailparam_slot);
zjp = zfs_jailparam_find(pr, &ppr);
if (ppr == pr) {
free(nzjp, M_PRISON);
osd_free_reserved(rsv);
goto done;
}
/* Inherit the initial values from the ancestor. */
mtx_lock(&pr->pr_mtx);
(void) osd_jail_set_reserved(pr, zfs_jailparam_slot, rsv, nzjp);
(void) memcpy(nzjp, zjp, sizeof (*zjp));
zjp = nzjp;
mtx_unlock(&ppr->pr_mtx);
done:
if (zjpp != NULL)
*zjpp = zjp;
else
mtx_unlock(&pr->pr_mtx);
}
/*
* Jail OSD methods for ZFS VFS info.
*/
static int
zfs_jailparam_create(void *obj, void *data)
{
struct prison *pr = obj;
struct vfsoptlist *opts = data;
int jsys;
if (vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys)) == 0 &&
jsys == JAIL_SYS_INHERIT)
return (0);
/*
* Inherit a prison's initial values from its parent
* (different from JAIL_SYS_INHERIT which also inherits changes).
*/
zfs_jailparam_alloc(pr, NULL);
return (0);
}
static int
zfs_jailparam_get(void *obj, void *data)
{
struct prison *ppr, *pr = obj;
struct vfsoptlist *opts = data;
struct zfs_jailparam *zjp;
int jsys, error;
zjp = zfs_jailparam_find(pr, &ppr);
jsys = (ppr == pr) ? JAIL_SYS_NEW : JAIL_SYS_INHERIT;
error = vfs_setopt(opts, "zfs", &jsys, sizeof (jsys));
if (error != 0 && error != ENOENT)
goto done;
if (jsys == JAIL_SYS_NEW) {
error = vfs_setopt(opts, "zfs.mount_snapshot",
&zjp->mount_snapshot, sizeof (zjp->mount_snapshot));
if (error != 0 && error != ENOENT)
goto done;
} else {
/*
* If this prison is inheriting its ZFS info, report
* empty/zero parameters.
*/
static int mount_snapshot = 0;
error = vfs_setopt(opts, "zfs.mount_snapshot",
&mount_snapshot, sizeof (mount_snapshot));
if (error != 0 && error != ENOENT)
goto done;
}
error = 0;
done:
mtx_unlock(&ppr->pr_mtx);
return (error);
}
static int
zfs_jailparam_set(void *obj, void *data)
{
struct prison *pr = obj;
struct prison *ppr;
struct vfsoptlist *opts = data;
int error, jsys, mount_snapshot;
/* Set the parameters, which should be correct. */
error = vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys));
if (error == ENOENT)
jsys = -1;
error = vfs_copyopt(opts, "zfs.mount_snapshot", &mount_snapshot,
sizeof (mount_snapshot));
if (error == ENOENT)
mount_snapshot = -1;
else
jsys = JAIL_SYS_NEW;
switch (jsys) {
case JAIL_SYS_NEW:
{
/* "zfs=new" or "zfs.*": the prison gets its own ZFS info. */
struct zfs_jailparam *zjp;
/*
* A child jail cannot have more permissions than its parent
*/
if (pr->pr_parent != &prison0) {
zjp = zfs_jailparam_find(pr->pr_parent, &ppr);
mtx_unlock(&ppr->pr_mtx);
if (zjp->mount_snapshot < mount_snapshot) {
return (EPERM);
}
}
zfs_jailparam_alloc(pr, &zjp);
if (mount_snapshot != -1)
zjp->mount_snapshot = mount_snapshot;
mtx_unlock(&pr->pr_mtx);
break;
}
case JAIL_SYS_INHERIT:
/* "zfs=inherit": inherit the parent's ZFS info. */
mtx_lock(&pr->pr_mtx);
osd_jail_del(pr, zfs_jailparam_slot);
mtx_unlock(&pr->pr_mtx);
break;
case -1:
/*
* If the setting being changed is not ZFS related
* then do nothing.
*/
break;
}
return (0);
}
static int
zfs_jailparam_check(void *obj __unused, void *data)
{
struct vfsoptlist *opts = data;
int error, jsys, mount_snapshot;
/* Check that the parameters are correct. */
error = vfs_copyopt(opts, "zfs", &jsys, sizeof (jsys));
if (error != ENOENT) {
if (error != 0)
return (error);
if (jsys != JAIL_SYS_NEW && jsys != JAIL_SYS_INHERIT)
return (EINVAL);
}
error = vfs_copyopt(opts, "zfs.mount_snapshot", &mount_snapshot,
sizeof (mount_snapshot));
if (error != ENOENT) {
if (error != 0)
return (error);
if (mount_snapshot != 0 && mount_snapshot != 1)
return (EINVAL);
}
return (0);
}
static void
zfs_jailparam_destroy(void *data)
{
free(data, M_PRISON);
}
static void
zfs_jailparam_sysinit(void *arg __unused)
{
struct prison *pr;
osd_method_t methods[PR_MAXMETHOD] = {
[PR_METHOD_CREATE] = zfs_jailparam_create,
[PR_METHOD_GET] = zfs_jailparam_get,
[PR_METHOD_SET] = zfs_jailparam_set,
[PR_METHOD_CHECK] = zfs_jailparam_check,
};
zfs_jailparam_slot = osd_jail_register(zfs_jailparam_destroy, methods);
/* Copy the defaults to any existing prisons. */
sx_slock(&allprison_lock);
TAILQ_FOREACH(pr, &allprison, pr_list)
zfs_jailparam_alloc(pr, NULL);
sx_sunlock(&allprison_lock);
}
static void
zfs_jailparam_sysuninit(void *arg __unused)
{
osd_jail_deregister(zfs_jailparam_slot);
}
SYSINIT(zfs_jailparam_sysinit, SI_SUB_DRIVERS, SI_ORDER_ANY,
zfs_jailparam_sysinit, NULL);
SYSUNINIT(zfs_jailparam_sysuninit, SI_SUB_DRIVERS, SI_ORDER_ANY,
zfs_jailparam_sysuninit, NULL);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
index d26d89544e7c..c4f2b722ef4e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c
@@ -1,2141 +1,2228 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/* Used by fstat(1). */
SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD,
SYSCTL_NULL_INT_PTR, sizeof (znode_t), "sizeof(znode_t)");
/*
* Define ZNODE_STATS to turn on statistic gathering. By default, it is only
* turned on when DEBUG is also defined.
*/
#ifdef ZFS_DEBUG
#define ZNODE_STATS
#endif /* DEBUG */
#ifdef ZNODE_STATS
#define ZNODE_STAT_ADD(stat) ((stat)++)
#else
#define ZNODE_STAT_ADD(stat) /* nothing */
#endif /* ZNODE_STATS */
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
#if !defined(KMEM_DEBUG) && __FreeBSD_version >= 1300102
#define _ZFS_USE_SMR
static uma_zone_t znode_uma_zone;
#else
static kmem_cache_t *znode_cache = NULL;
#endif
extern struct vop_vector zfs_vnodeops;
extern struct vop_vector zfs_fifoops;
extern struct vop_vector zfs_shareops;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
POINTER_INVALIDATE(&zp->z_zfsvfs);
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
zp->z_vnode = NULL;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
return (0);
}
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_t *zp = buf;
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
ASSERT3P(zp->z_vnode, ==, NULL);
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
}
#ifdef _ZFS_USE_SMR
VFS_SMR_DECLARE;
static int
zfs_znode_cache_constructor_smr(void *mem, int size __unused, void *private,
int flags)
{
return (zfs_znode_cache_constructor(mem, private, flags));
}
static void
zfs_znode_cache_destructor_smr(void *mem, int size __unused, void *private)
{
zfs_znode_cache_destructor(mem, private);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache
*/
ASSERT3P(znode_uma_zone, ==, NULL);
znode_uma_zone = uma_zcreate("zfs_znode_cache",
sizeof (znode_t), zfs_znode_cache_constructor_smr,
zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
VFS_SMR_ZONE_SET(znode_uma_zone);
}
static znode_t *
zfs_znode_alloc_kmem(int flags)
{
return (uma_zalloc_smr(znode_uma_zone, flags));
}
static void
zfs_znode_free_kmem(znode_t *zp)
{
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
uma_zfree_smr(znode_uma_zone, zp);
}
#else
void
zfs_znode_init(void)
{
/*
* Initialize zcache
*/
ASSERT3P(znode_cache, ==, NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
}
static znode_t *
zfs_znode_alloc_kmem(int flags)
{
return (kmem_cache_alloc(znode_cache, flags));
}
static void
zfs_znode_free_kmem(znode_t *zp)
{
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
#endif
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
#ifdef _ZFS_USE_SMR
if (znode_uma_zone) {
uma_zdestroy(znode_uma_zone);
znode_uma_zone = NULL;
}
#else
if (znode_cache) {
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
}
#endif
}
static int
zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
zfs_acl_ids_t acl_ids;
vattr_t vattr;
znode_t *sharezp;
znode_t *zp;
int error;
vattr.va_mask = AT_MODE|AT_UID|AT_GID;
vattr.va_type = VDIR;
vattr.va_mode = S_IFDIR|0555;
vattr.va_uid = crgetuid(kcred);
vattr.va_gid = crgetgid(kcred);
sharezp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
sharezp->z_zfsvfs = zfsvfs;
sharezp->z_is_sa = zfsvfs->z_use_sa;
VERIFY0(zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
kcred, NULL, &acl_ids, NULL));
zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, sharezp);
POINTER_INVALIDATE(&sharezp->z_zfsvfs);
error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
zfsvfs->z_shares_dir = sharezp->z_id;
zfs_acl_ids_free(&acl_ids);
sa_handle_destroy(sharezp->z_sa_hdl);
zfs_znode_free_kmem(sharezp);
return (error);
}
/*
* define a couple of values we need available
* for both 64 and 32 bit environments.
*/
#ifndef NBITSMINOR64
#define NBITSMINOR64 32
#endif
#ifndef MAXMAJ64
#define MAXMAJ64 0xffffffffUL
#endif
#ifndef MAXMIN64
#define MAXMIN64 0xffffffffUL
#endif
/*
* Create special expldev for ZFS private use.
* Can't use standard expldev since it doesn't do
* what we want. The standard expldev() takes a
* dev32_t in LP64 and expands it to a long dev_t.
* We need an interface that takes a dev32_t in ILP32
* and expands it to a long dev_t.
*/
static uint64_t
zfs_expldev(dev_t dev)
{
return (((uint64_t)major(dev) << NBITSMINOR64) | minor(dev));
}
/*
* Special cmpldev for ZFS private use.
* Can't use standard cmpldev since it takes
* a long dev_t and compresses it to dev32_t in
* LP64. We need to do a compaction of a long dev_t
* to a dev32_t in ILP32.
*/
dev_t
zfs_cmpldev(uint64_t dev)
{
return (makedev((dev >> NBITSMINOR64), (dev & MAXMIN64)));
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
ASSERT3P(zp->z_sa_hdl, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
if (sa_hdl == NULL) {
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
/*
* Slap on VROOT if we are the root znode unless we are the root
* node of a snapshot mounted under .zfs.
*/
if (zp->z_id == zfsvfs->z_root && zfsvfs->z_parent == zfsvfs)
ZTOV(zp)->v_flag |= VROOT;
vn_exists(ZTOV(zp));
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zp->z_zfsvfs));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
static void
zfs_vnode_forget(vnode_t *vp)
{
/* copied from insmntque_stddtr */
vp->v_data = NULL;
vp->v_op = &dead_vnodeops;
vgone(vp);
vput(vp);
}
/*
* Construct a new znode/vnode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
vnode_t *vp;
uint64_t mode;
uint64_t parent;
#ifdef notyet
uint64_t mtime[2], ctime[2];
#endif
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[9];
int count = 0;
int error;
zp = zfs_znode_alloc_kmem(KM_SLEEP);
#ifndef _ZFS_USE_SMR
KASSERT((zfsvfs->z_parent->z_vfs->mnt_kern_flag & MNTK_FPLOOKUP) == 0,
("%s: fast path lookup enabled without smr", __func__));
#endif
#if __FreeBSD_version >= 1300076
KASSERT(curthread->td_vp_reserved != NULL,
("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
#else
KASSERT(curthread->td_vp_reserv > 0,
("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
#endif
error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp);
if (error != 0) {
zfs_znode_free_kmem(zp);
return (NULL);
}
zp->z_vnode = vp;
vp->v_data = zp;
/*
* Acquire the vnode lock before any possible interaction with the
* outside world. Specifically, there is an error path that calls
* zfs_vnode_forget() and the vnode should be exclusively locked.
*/
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
zp->z_sa_hdl = NULL;
zp->z_unlinked = 0;
zp->z_atime_dirty = 0;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
#if __FreeBSD_version >= 1300139
atomic_store_ptr(&zp->z_cached_symlink, NULL);
#endif
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&zp->z_links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, 16);
#ifdef notyet
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
#endif
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&zp->z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&zp->z_gid, 8);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zfs_vnode_forget(vp);
zp->z_vnode = NULL;
zfs_znode_free_kmem(zp);
return (NULL);
}
zp->z_projid = projid;
zp->z_mode = mode;
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
vp->v_type = IFTOVT((mode_t)mode);
switch (vp->v_type) {
case VDIR:
zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
break;
case VFIFO:
vp->v_op = &zfs_fifoops;
break;
case VREG:
if (parent == zfsvfs->z_shares_dir) {
ASSERT0(zp->z_uid);
ASSERT0(zp->z_gid);
vp->v_op = &zfs_shareops;
}
break;
default:
break;
}
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
zp->z_zfsvfs = zfsvfs;
mutex_exit(&zfsvfs->z_znodes_lock);
#if __FreeBSD_version >= 1400077
vn_set_state(vp, VSTATE_CONSTRUCTED);
#endif
VN_LOCK_AREC(vp);
if (vp->v_type != VFIFO)
VN_LOCK_ASHARE(vp);
return (zp);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_XATTR - new object is an attribute
* bonuslen - length of bonus buffer
* setaclp - File/Dir initial ACL
* fuidp - Tracks fuid allocation.
*
* OUT: zpp - allocated znode
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t dzp_pflags = 0;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
dmu_buf_t *db;
timestruc_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
ASSERT3P(vap, !=, NULL);
ASSERT3U((vap->va_mask & AT_MODE), ==, AT_MODE);
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
vfs_timestamp(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (vap->va_type == VDIR) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
} else {
dzp_pflags = dzp->z_pflags;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (vap->va_type == VDIR) {
size = 2; /* contents ("." and "..") */
links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
} else {
size = links = 0;
}
if (vap->va_type == VBLK || vap->va_type == VCHR) {
rdev = zfs_expldev(vap->va_rdev);
}
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & AT_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & AT_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(vap->va_type == VBLK || vap->va_type == VCHR)) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
if (!(flag & IS_ROOT_NODE)) {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
ASSERT3P(*zpp, !=, NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
if (vap->va_mask & AT_XVATTR)
zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
if (!(flag & IS_ROOT_NODE)) {
vnode_t *vp = ZTOV(*zpp);
vp->v_vflag |= VV_FORCEINSMQ;
int err = insmntque(vp, zfsvfs->z_vfs);
vp->v_vflag &= ~VV_FORCEINSMQ;
(void) err;
KASSERT(err == 0, ("insmntque() failed: error %d", err));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
xoap = xva_getxoptattr(xvap);
ASSERT3P(xoap, !=, NULL);
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
vnode_t *vp;
sa_handle_t *hdl;
int locked;
int err;
getnewvnode_reserve_();
again:
*zpp = NULL;
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
getnewvnode_drop_reserve();
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
getnewvnode_drop_reserve();
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
ASSERT3U(zp->z_id, ==, obj_num);
if (zp->z_unlinked) {
err = SET_ERROR(ENOENT);
} else {
vp = ZTOV(zp);
/*
* Don't let the vnode disappear after
* ZFS_OBJ_HOLD_EXIT.
*/
VN_HOLD(vp);
*zpp = zp;
err = 0;
}
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
if (err) {
getnewvnode_drop_reserve();
return (err);
}
locked = VOP_ISLOCKED(vp);
VI_LOCK(vp);
if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) {
/*
* The vnode is doomed and this thread doesn't
* hold the exclusive lock on it, so the vnode
* must be being reclaimed by another thread.
* Otherwise the doomed vnode is being reclaimed
* by this thread and zfs_zget is called from
* ZIL internals.
*/
VI_UNLOCK(vp);
/*
* XXX vrele() locks the vnode when the last reference
* is dropped. Although in this case the vnode is
* doomed / dead and so no inactivation is required,
* the vnode lock is still acquired. That could result
* in a LOR with z_teardown_lock if another thread holds
* the vnode's lock and tries to take z_teardown_lock.
* But that is only possible if the other thread peforms
* a ZFS vnode operation on the vnode. That either
* should not happen if the vnode is dead or the thread
* should also have a reference to the vnode and thus
* our reference is not last.
*/
VN_RELE(vp);
goto again;
}
VI_UNLOCK(vp);
getnewvnode_drop_reserve();
return (err);
}
/*
* Not found create new znode/vnode
* but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
if (err == 0) {
vnode_t *vp = ZTOV(zp);
err = insmntque(vp, zfsvfs->z_vfs);
if (err == 0) {
vp->v_hash = obj_num;
VOP_UNLOCK1(vp);
} else {
zp->z_vnode = NULL;
zfs_znode_dmu_fini(zp);
zfs_znode_free(zp);
*zpp = NULL;
}
}
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
getnewvnode_drop_reserve();
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
dmu_object_info_t doi;
dmu_buf_t *db;
vnode_t *vp;
uint64_t obj_num = zp->z_id;
uint64_t mode, size;
sa_bulk_attr_t bulk[8];
int err;
int count = 0;
uint64_t gen;
/*
* Remove cached pages before reloading the znode, so that they are not
* lingering after we run into any error. Ideally, we should vgone()
* the vnode in case of error, but currently we cannot do that
* because of the LOR between the vnode lock and z_teardown_lock.
* So, instead, we have to "doom" the znode in the illumos style.
*
* Ignore invalid pages during the scan. This is to avoid deadlocks
* between page busying and the teardown lock, as pages are busied prior
* to a VOP_GETPAGES operation, which acquires the teardown read lock.
* Such pages will be invalid and can safely be skipped here.
*/
vp = ZTOV(zp);
#if __FreeBSD_version >= 1400042
vn_pages_remove_valid(vp, 0, 0);
#else
vn_pages_remove(vp, 0, 0);
#endif
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT3P(zp->z_sa_hdl, ==, NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
size = zp->z_size;
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&zp->z_links, sizeof (zp->z_links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, sizeof (zp->z_atime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&zp->z_uid, sizeof (zp->z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&zp->z_gid, sizeof (zp->z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (SET_ERROR(EIO));
}
zp->z_mode = mode;
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (SET_ERROR(EIO));
}
/*
* It is highly improbable but still quite possible that two
* objects in different datasets are created with the same
* object numbers and in transaction groups with the same
* numbers. znodes corresponding to those objects would
* have the same z_id and z_gen, but their other attributes
* may be different.
* zfs recv -F may replace one of such objects with the other.
* As a result file properties recorded in the replaced
* object's vnode may no longer match the received object's
* properties. At present the only cached property is the
* files type recorded in v_type.
* So, handle this case by leaving the old vnode and znode
* disassociated from the actual object. A new vnode and a
* znode will be created if the object is accessed
* (e.g. via a look-up). The old vnode and znode will be
* recycled when the last vnode reference is dropped.
*/
if (vp->v_type != IFTOVT((mode_t)zp->z_mode)) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (SET_ERROR(EIO));
}
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatically removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (zp->z_links == 0);
if (zp->z_unlinked) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (0);
}
zp->z_blksz = doi.doi_data_block_size;
if (zp->z_size != size)
vnode_pager_setsize(vp, zp->z_size);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY0(dmu_object_free(os, acl_obj, tx));
}
VERIFY0(dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
zfs_znode_free(zp);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t z_id = zp->z_id;
ASSERT3P(zp->z_sa_hdl, !=, NULL);
/*
* Don't allow a zfs_zget() while were trying to release this znode
*/
ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
zfs_rmnode(zp);
return;
}
}
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
zfs_znode_free(zp);
}
void
zfs_znode_free(znode_t *zp)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
#if __FreeBSD_version >= 1300139
char *symlink;
#endif
ASSERT3P(zp->z_sa_hdl, ==, NULL);
zp->z_vnode = NULL;
mutex_enter(&zfsvfs->z_znodes_lock);
POINTER_INVALIDATE(&zp->z_zfsvfs);
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
mutex_exit(&zfsvfs->z_znodes_lock);
#if __FreeBSD_version >= 1300139
symlink = atomic_load_ptr(&zp->z_cached_symlink);
if (symlink != NULL) {
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)NULL);
cache_symlink_free(symlink, strlen(symlink) + 1);
}
#endif
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
zfs_znode_free_kmem(zp);
}
void
zfs_tstamp_update_setup_ext(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2], boolean_t have_tx)
{
timestruc_t now;
vfs_timestamp(&now);
if (have_tx) { /* will sa_bulk_update happen really soon? */
zp->z_atime_dirty = 0;
zp->z_seq++;
} else {
zp->z_atime_dirty = 1;
}
if (flag & AT_ATIME) {
ZFS_TIME_ENCODE(&now, zp->z_atime);
}
if (flag & AT_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
if (zp->z_zfsvfs->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & AT_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
if (zp->z_zfsvfs->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
zfs_tstamp_update_setup_ext(zp, flag, mtime, ctime, B_TRUE);
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx));
vnode_pager_setsize(ZTOV(zp), end);
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
if (error == 0) {
#if __FreeBSD_version >= 1400032
vnode_pager_purge_range(ZTOV(zp), off, off + len);
#else
/*
* Before __FreeBSD_version 1400032 we cannot free block in the
* middle of a file, but only at the end of a file, so this code
* path should never happen.
*/
vnode_pager_setsize(ZTOV(zp), off);
#endif
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
vnode_t *vp = ZTOV(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
dmu_tx_commit(tx);
/*
* Clear any mapped pages in the truncated region. This has to
* happen outside of the transaction to avoid the possibility of
* a deadlock with someone trying to push a page that we are
* about to invalidate.
*/
vnode_pager_setsize(vp, end);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
else
return (error);
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
return (error);
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(error);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
return (0);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int error;
int i;
znode_t *rootzp = NULL;
zfsvfs_t *zfsvfs;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT0(error);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
const char *name;
ASSERT3S(nvpair_type(elem), ==, DATA_TYPE_UINT64);
val = fnvpair_value_uint64(elem);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT0(error);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT3U(version, !=, 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
ASSERT0(error);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT0(error);
/*
* Create root znode. Create minimal znode/vnode/zfsvfs
* to allow zfs_mknode to work.
*/
VATTR_NULL(&vattr);
vattr.va_mask = AT_MODE|AT_UID|AT_GID;
vattr.va_type = VDIR;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
rootzp = zfs_znode_alloc_kmem(KM_SLEEP);
ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
rootzp->z_unlinked = 0;
rootzp->z_atime_dirty = 0;
rootzp->z_is_sa = USE_SA(version, os);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT0(error);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
rootzp->z_zfsvfs = zfsvfs;
VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, NULL));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT0(error);
zfs_acl_ids_free(&acl_ids);
POINTER_INVALIDATE(&rootzp->z_zfsvfs);
sa_handle_destroy(rootzp->z_sa_hdl);
zfs_znode_free_kmem(rootzp);
/*
* Create shares directory
*/
error = zfs_create_share_dir(zfsvfs, tx);
ASSERT0(error);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, const void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
for (;;) {
uint64_t pobj;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir;
if (prevdb) {
ASSERT3P(prevhdl, !=, NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
(void) sprintf(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT3P(path, >=, buf);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT3P(sa_db, !=, NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
+/*
+ * Read a property stored within the master node.
+ */
+int
+zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
+{
+ uint64_t *cached_copy = NULL;
+
+ /*
+ * Figure out where in the objset_t the cached copy would live, if it
+ * is available for the requested property.
+ */
+ if (os != NULL) {
+ switch (prop) {
+ case ZFS_PROP_VERSION:
+ cached_copy = &os->os_version;
+ break;
+ case ZFS_PROP_NORMALIZE:
+ cached_copy = &os->os_normalization;
+ break;
+ case ZFS_PROP_UTF8ONLY:
+ cached_copy = &os->os_utf8only;
+ break;
+ case ZFS_PROP_CASE:
+ cached_copy = &os->os_casesensitivity;
+ break;
+ default:
+ break;
+ }
+ }
+ if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
+ *value = *cached_copy;
+ return (0);
+ }
+
+ /*
+ * If the property wasn't cached, look up the file system's value for
+ * the property. For the version property, we look up a slightly
+ * different string.
+ */
+ const char *pname;
+ int error = ENOENT;
+ if (prop == ZFS_PROP_VERSION) {
+ pname = ZPL_VERSION_STR;
+ } else {
+ pname = zfs_prop_to_name(prop);
+ }
+
+ if (os != NULL) {
+ ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
+ error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
+ }
+
+ if (error == ENOENT) {
+ /* No value set, use the default value */
+ switch (prop) {
+ case ZFS_PROP_VERSION:
+ *value = ZPL_VERSION;
+ break;
+ case ZFS_PROP_NORMALIZE:
+ case ZFS_PROP_UTF8ONLY:
+ *value = 0;
+ break;
+ case ZFS_PROP_CASE:
+ *value = ZFS_CASE_SENSITIVE;
+ break;
+ case ZFS_PROP_ACLTYPE:
+ *value = ZFS_ACLTYPE_NFSV4;
+ break;
+ default:
+ return (error);
+ }
+ error = 0;
+ }
+
+ /*
+ * If one of the methods for getting the property value above worked,
+ * copy it into the objset_t's cache.
+ */
+ if (error == 0 && cached_copy != NULL) {
+ *cached_copy = *value;
+ }
+
+ return (error);
+}
+
+
void
zfs_znode_update_vfs(znode_t *zp)
{
vm_object_t object;
if ((object = ZTOV(zp)->v_object) == NULL ||
zp->z_size == object->un_pager.vnp.vnp_size)
return;
vnode_pager_setsize(ZTOV(zp), zp->z_size);
}
#ifdef _KERNEL
int
zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t parent;
int is_xattrdir;
int err;
/* Extended attributes should not be visible as regular files. */
if ((zp->z_pflags & ZFS_XATTR) != 0)
return (SET_ERROR(EINVAL));
err = zfs_obj_to_pobj(zfsvfs->z_os, zp->z_sa_hdl, zfsvfs->z_attr_table,
&parent, &is_xattrdir);
if (err != 0)
return (err);
ASSERT0(is_xattrdir);
/* No name as this is a root object. */
if (parent == zp->z_id)
return (SET_ERROR(EINVAL));
err = zap_value_search(zfsvfs->z_os, parent, zp->z_id,
ZFS_DIRENT_OBJ(-1ULL), buf);
if (err != 0)
return (err);
err = zfs_zget(zfsvfs, parent, dzpp);
return (err);
}
#endif /* _KERNEL */
#ifdef _KERNEL
int
zfs_rlimit_fsize(off_t fsize)
{
struct thread *td = curthread;
off_t lim;
if (td == NULL)
return (0);
lim = lim_cur(td, RLIMIT_FSIZE);
if (__predict_true((uoff_t)fsize <= lim))
return (0);
/*
* The limit is reached.
*/
PROC_LOCK(td->td_proc);
kern_psignal(td->td_proc, SIGXFSZ);
PROC_UNLOCK(td->td_proc);
return (EFBIG);
}
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index 963e7a1ec96a..3c30dfc577b4 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -1,1461 +1,1474 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/percpu_compat.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/taskq.h>
#include <sys/timer.h>
#include <sys/vmem.h>
#include <sys/wait.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/prefetch.h>
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
* are removed to allow access to the real Linux slab allocator.
*/
#undef kmem_cache_destroy
#undef kmem_cache_create
#undef kmem_cache_alloc
#undef kmem_cache_free
/*
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
* with smp_mb__{before,after}_atomic() because they were redundant. This is
* only used inside our SLAB allocator, so we implement an internal wrapper
* here to give us smp_mb__{before,after}_atomic() on older kernels.
*/
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif
/* BEGIN CSTYLED */
/*
* Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently
* freed objects, which can then be reallocated without taking a lock. This
* can improve performance on highly contended caches. However, because
* objects in magazines will prevent otherwise empty slabs from being
* immediately released this may not be ideal for low memory machines.
*
* For this reason spl_kmem_cache_magazine_size can be used to set a maximum
* magazine size. When this value is set to 0 the magazine size will be
* automatically determined based on the object size. Otherwise magazines
* will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
* may never be entirely disabled in this implementation.
*/
static unsigned int spl_kmem_cache_magazine_size = 0;
module_param(spl_kmem_cache_magazine_size, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
"Default magazine size (2-256), set automatically (0)");
/*
* The default behavior is to report the number of objects remaining in the
* cache. This allows the Linux VM to repeatedly reclaim objects from the
* cache when memory is low satisfy other memory allocations. Alternately,
* setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
* is reclaimed. This may increase the likelihood of out of memory events.
*/
static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
module_param(spl_kmem_cache_reclaim, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
module_param(spl_kmem_cache_max_size, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
/*
* For small objects the Linux slab allocator should be used to make the most
* efficient use of the memory. However, large objects are not supported by
* the Linux slab and therefore the SPL implementation is preferred. A cutoff
* of 16K was determined to be optimal for architectures using 4K pages and
* to also work well on architecutres using larger 64K page sizes.
*/
static unsigned int spl_kmem_cache_slab_limit = 16384;
module_param(spl_kmem_cache_slab_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
"Objects less than N bytes use the Linux slab");
/*
* The number of threads available to allocate new slabs for caches. This
* should not need to be tuned but it is available for performance analysis.
*/
static unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads");
/* END CSTYLED */
/*
* Slab allocation interfaces
*
* While the Linux slab implementation was inspired by the Solaris
* implementation I cannot use it to emulate the Solaris APIs. I
* require two features which are not provided by the Linux slab.
*
* 1) Constructors AND destructors. Recent versions of the Linux
* kernel have removed support for destructors. This is a deal
* breaker for the SPL which contains particularly expensive
* initializers for mutex's, condition variables, etc. We also
* require a minimal level of cleanup for these data types unlike
* many Linux data types which do need to be explicitly destroyed.
*
* 2) Virtual address space backed slab. Callers of the Solaris slab
* expect it to work well for both small are very large allocations.
* Because of memory fragmentation the Linux slab which is backed
* by kmalloc'ed memory performs very badly when confronted with
* large numbers of large allocations. Basing the slab on the
* virtual address space removes the need for contiguous pages
* and greatly improve performance for large allocations.
*
* For these reasons, the SPL has its own slab implementation with
* the needed features. It is not as highly optimized as either the
* Solaris or Linux slabs, but it should get me most of what is
* needed until it can be optimized or obsoleted by another approach.
*
* One serious concern I do have about this method is the relatively
* small virtual address space on 32bit arches. This will seriously
* constrain the size of the slab caches and their performance.
*/
struct list_head spl_kmem_cache_list; /* List of caches */
struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
/* Resulting allocated memory will be page aligned */
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return (ptr);
}
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
/*
* The Linux direct reclaim path uses this out of band value to
* determine if forward progress is being made. Normally this is
* incremented by kmem_freepages() which is part of the various
* Linux slab implementations. However, since we are using none
* of that infrastructure we are responsible for incrementing it.
*/
if (current->reclaim_state)
+#ifdef HAVE_RECLAIM_STATE_RECLAIMED
+ current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
+#else
current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
-
+#endif
vfree(ptr);
}
/*
* Required space for each aligned sks.
*/
static inline uint32_t
spl_sks_size(spl_kmem_cache_t *skc)
{
return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
skc->skc_obj_align, uint32_t));
}
/*
* Required space for each aligned object.
*/
static inline uint32_t
spl_obj_size(spl_kmem_cache_t *skc)
{
uint32_t align = skc->skc_obj_align;
return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
}
uint64_t
spl_kmem_cache_inuse(kmem_cache_t *cache)
{
return (cache->skc_obj_total);
}
EXPORT_SYMBOL(spl_kmem_cache_inuse);
uint64_t
spl_kmem_cache_entry_size(kmem_cache_t *cache)
{
return (cache->skc_obj_size);
}
EXPORT_SYMBOL(spl_kmem_cache_entry_size);
/*
* Lookup the spl_kmem_object_t for an object given that object.
*/
static inline spl_kmem_obj_t *
spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
{
return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
skc->skc_obj_align, uint32_t));
}
/*
* It's important that we pack the spl_kmem_obj_t structure and the
* actual objects in to one large address space to minimize the number
* of calls to the allocator. It is far better to do a few large
* allocations and then subdivide it ourselves. Now which allocator
* we use requires balancing a few trade offs.
*
* For small objects we use kmem_alloc() because as long as you are
* only requesting a small number of pages (ideally just one) its cheap.
* However, when you start requesting multiple pages with kmem_alloc()
* it gets increasingly expensive since it requires contiguous pages.
* For this reason we shift to vmem_alloc() for slabs of large objects
* which removes the need for contiguous pages. We do not use
* vmem_alloc() in all cases because there is significant locking
* overhead in __get_vm_area_node(). This function takes a single
* global lock when acquiring an available virtual address range which
* serializes all vmem_alloc()'s for all slab caches. Using slightly
* different allocation functions for small and large objects should
* give us the best of both worlds.
*
* +------------------------+
* | spl_kmem_slab_t --+-+ |
* | skc_obj_size <-+ | |
* | spl_kmem_obj_t | |
* | skc_obj_size <---+ |
* | spl_kmem_obj_t | |
* | ... v |
* +------------------------+
*/
static spl_kmem_slab_t *
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
void *base;
uint32_t obj_size;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
sks->sks_objs = skc->skc_slab_objs;
sks->sks_age = jiffies;
sks->sks_cache = skc;
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
obj_size = spl_obj_size(skc);
for (int i = 0; i < sks->sks_objs; i++) {
void *obj = base + spl_sks_size(skc) + (i * obj_size);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
INIT_LIST_HEAD(&sko->sko_list);
list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
return (sks);
}
/*
* Remove a slab from complete or partial list, it must be called with
* the 'skc->skc_lock' held but the actual free must be performed
* outside the lock to prevent deadlocking on vmem addresses.
*/
static void
spl_slab_free(spl_kmem_slab_t *sks,
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
/*
* Update slab/objects counters in the cache, then remove the
* slab from the skc->skc_partial_list. Finally add the slab
* and all its objects in to the private work lists where the
* destructors will be called and the memory freed to the system.
*/
skc->skc_obj_total -= sks->sks_objs;
skc->skc_slab_total--;
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
}
/*
* Reclaim empty slabs at the end of the partial list.
*/
static void
spl_slab_reclaim(spl_kmem_cache_t *skc)
{
spl_kmem_slab_t *sks = NULL, *m = NULL;
spl_kmem_obj_t *sko = NULL, *n = NULL;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
/*
* Empty slabs and objects must be moved to a private list so they
* can be safely freed outside the spin lock. All empty slabs are
* at the end of skc->skc_partial_list, therefore once a non-empty
* slab is found we can stop scanning.
*/
spin_lock(&skc->skc_lock);
list_for_each_entry_safe_reverse(sks, m,
&skc->skc_partial_list, sks_list) {
if (sks->sks_ref > 0)
break;
spl_slab_free(sks, &sks_list, &sko_list);
}
spin_unlock(&skc->skc_lock);
/*
* The following two loops ensure all the object destructors are run,
* and the slabs themselves are freed. This is all done outside the
* skc->skc_lock since this allows the destructor to sleep, and
* allows us to perform a conditional reschedule when a freeing a
* large number of objects and slabs back to the system.
*/
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
}
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
}
static spl_kmem_emergency_t *
spl_emergency_search(struct rb_root *root, void *obj)
{
struct rb_node *node = root->rb_node;
spl_kmem_emergency_t *ske;
unsigned long address = (unsigned long)obj;
while (node) {
ske = container_of(node, spl_kmem_emergency_t, ske_node);
if (address < ske->ske_obj)
node = node->rb_left;
else if (address > ske->ske_obj)
node = node->rb_right;
else
return (ske);
}
return (NULL);
}
static int
spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
spl_kmem_emergency_t *ske_tmp;
unsigned long address = ske->ske_obj;
while (*new) {
ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
parent = *new;
if (address < ske_tmp->ske_obj)
new = &((*new)->rb_left);
else if (address > ske_tmp->ske_obj)
new = &((*new)->rb_right);
else
return (0);
}
rb_link_node(&ske->ske_node, parent, new);
rb_insert_color(&ske->ske_node, root);
return (1);
}
/*
* Allocate a single emergency object and track it in a red black tree.
*/
static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
gfp_t lflags = kmem_flags_convert(flags);
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
int empty;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
return (-EEXIST);
ske = kmalloc(sizeof (*ske), lflags);
if (ske == NULL)
return (-ENOMEM);
ske->ske_obj = __get_free_pages(lflags, order);
if (ske->ske_obj == 0) {
kfree(ske);
return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
if (likely(empty)) {
skc->skc_obj_total++;
skc->skc_obj_emergency++;
if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
skc->skc_obj_emergency_max = skc->skc_obj_emergency;
}
spin_unlock(&skc->skc_lock);
if (unlikely(!empty)) {
free_pages(ske->ske_obj, order);
kfree(ske);
return (-EINVAL);
}
*obj = (void *)ske->ske_obj;
return (0);
}
/*
* Locate the passed object in the red black tree and free it.
*/
static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
int order = get_order(skc->skc_obj_size);
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
if (ske) {
rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
skc->skc_obj_emergency--;
skc->skc_obj_total--;
}
spin_unlock(&skc->skc_lock);
if (ske == NULL)
return (-ENOENT);
free_pages(ske->ske_obj, order);
kfree(ske);
return (0);
}
/*
* Release objects from the per-cpu magazine back to their slab. The flush
* argument contains the max number of entries to remove from the magazine.
*/
static void
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
spin_lock(&skc->skc_lock);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
int count = MIN(flush, skm->skm_avail);
for (int i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof (void *) * skm->skm_avail);
spin_unlock(&skc->skc_lock);
}
/*
* Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page.
*/
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
if (tgt_size <= max_size) {
tgt_objs = (tgt_size - sks_size) / obj_size;
} else {
tgt_objs = (max_size - sks_size) / obj_size;
tgt_size = (tgt_objs * obj_size) + sks_size;
}
if (tgt_objs == 0)
return (-ENOSPC);
*objs = tgt_objs;
*size = tgt_size;
return (0);
}
/*
* Make a guess at reasonable per-cpu magazine size based on the size of
* each object and the cost of caching N of them in each magazine. Long
* term this should really adapt based on an observed usage heuristic.
*/
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
uint32_t obj_size = spl_obj_size(skc);
int size;
if (spl_kmem_cache_magazine_size > 0)
return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
return (size);
}
/*
* Allocate a per-cpu magazine to associate with a specific core.
*/
static spl_kmem_magazine_t *
spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
{
spl_kmem_magazine_t *skm;
int size = sizeof (spl_kmem_magazine_t) +
sizeof (void *) * skc->skc_mag_size;
skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;
skm->skm_size = skc->skc_mag_size;
skm->skm_refill = skc->skc_mag_refill;
skm->skm_cache = skc;
skm->skm_cpu = cpu;
}
return (skm);
}
/*
* Free a per-cpu magazine associated with a specific core.
*/
static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kfree(skm);
}
/*
* Create all pre-cpu magazines of reasonable sizes.
*/
static int
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for_each_possible_cpu(i) {
skc->skc_mag[i] = spl_magazine_alloc(skc, i);
if (!skc->skc_mag[i]) {
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
kfree(skc->skc_mag);
return (-ENOMEM);
}
}
return (0);
}
/*
* Destroy all pre-cpu magazines.
*/
static void
spl_magazine_destroy(spl_kmem_cache_t *skc)
{
spl_kmem_magazine_t *skm;
int i = 0;
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
kfree(skc->skc_mag);
}
/*
* Create a object cache based on the following arguments:
* name cache name
* size cache object size
* align cache object alignment
* ctor cache object constructor
* dtor cache object destructor
* reclaim cache object reclaim
* priv cache private data for ctor/dtor/reclaim
* vmp unused must be NULL
* flags
* KMC_KVMEM Force kvmem backed SPL cache
* KMC_SLAB Force Linux slab backed cache
* KMC_NODEBUG Disable debugging (unsupported)
*/
spl_kmem_cache_t *
spl_kmem_cache_create(const char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
void *priv, void *vmp, int flags)
{
gfp_t lflags = kmem_flags_convert(KM_SLEEP);
spl_kmem_cache_t *skc;
int rc;
/*
* Unsupported flags
*/
ASSERT(vmp == NULL);
ASSERT(reclaim == NULL);
might_sleep();
skc = kzalloc(sizeof (*skc), lflags);
if (skc == NULL)
return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = kmalloc(skc->skc_name_size, lflags);
if (skc->skc_name == NULL) {
kfree(skc);
return (NULL);
}
strlcpy(skc->skc_name, name, skc->skc_name_size);
skc->skc_ctor = ctor;
skc->skc_dtor = dtor;
skc->skc_private = priv;
skc->skc_vmp = vmp;
skc->skc_linux_cache = NULL;
skc->skc_flags = flags;
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
INIT_LIST_HEAD(&skc->skc_complete_list);
INIT_LIST_HEAD(&skc->skc_partial_list);
skc->skc_emergency_tree = RB_ROOT;
spin_lock_init(&skc->skc_lock);
init_waitqueue_head(&skc->skc_waitq);
skc->skc_slab_fail = 0;
skc->skc_slab_create = 0;
skc->skc_slab_destroy = 0;
skc->skc_slab_total = 0;
skc->skc_slab_alloc = 0;
skc->skc_slab_max = 0;
skc->skc_obj_total = 0;
skc->skc_obj_alloc = 0;
skc->skc_obj_max = 0;
skc->skc_obj_deadlock = 0;
skc->skc_obj_emergency = 0;
skc->skc_obj_emergency_max = 0;
rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
GFP_KERNEL);
if (rc != 0) {
kfree(skc);
return (NULL);
}
/*
* Verify the requested alignment restriction is sane.
*/
if (align) {
VERIFY(ISP2(align));
VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
VERIFY3U(align, <=, PAGE_SIZE);
skc->skc_obj_align = align;
}
/*
* When no specific type of slab is requested (kmem, vmem, or
* linuxslab) then select a cache type based on the object size
* and default tunables.
*/
if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
if (spl_kmem_cache_slab_limit &&
size <= (size_t)spl_kmem_cache_slab_limit) {
/*
* Objects smaller than spl_kmem_cache_slab_limit can
* use the Linux slab for better space-efficiency.
*/
skc->skc_flags |= KMC_SLAB;
} else {
/*
* All other objects are considered large and are
* placed on kvmem backed slabs.
*/
skc->skc_flags |= KMC_KVMEM;
}
}
/*
* Given the type of slab allocate the required resources.
*/
if (skc->skc_flags & KMC_KVMEM) {
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
goto out;
rc = spl_magazine_create(skc);
if (rc)
goto out;
} else {
unsigned long slabflags = 0;
if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE))
goto out;
#if defined(SLAB_USERCOPY)
/*
* Required for PAX-enabled kernels if the slab is to be
* used for copying between user and kernel space.
*/
slabflags |= SLAB_USERCOPY;
#endif
#if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
/*
* Newer grsec patchset uses kmem_cache_create_usercopy()
* instead of SLAB_USERCOPY flag
*/
skc->skc_linux_cache = kmem_cache_create_usercopy(
skc->skc_name, size, align, slabflags, 0, size, NULL);
#else
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, slabflags, NULL);
#endif
if (skc->skc_linux_cache == NULL)
goto out;
}
down_write(&spl_kmem_cache_sem);
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
return (skc);
out:
kfree(skc->skc_name);
percpu_counter_destroy(&skc->skc_linux_alloc);
kfree(skc);
return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
/*
* Register a move callback for cache defragmentation.
* XXX: Unimplemented but harmless to stub out for now.
*/
void
spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *))
{
ASSERT(move != NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_set_move);
/*
* Destroy a cache and all objects associated with the cache.
*/
void
spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem);
/* Cancel any and wait for any pending delayed tasks */
VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
spin_lock(&skc->skc_lock);
id = skc->skc_taskqid;
spin_unlock(&skc->skc_lock);
taskq_cancel_id(spl_kmem_cache_taskq, id);
/*
* Wait until all current callers complete, this is mainly
* to catch the case where a low memory situation triggers a
* cache reaping action which races with this destroy.
*/
wait_event(wq, atomic_read(&skc->skc_ref) == 0);
if (skc->skc_flags & KMC_KVMEM) {
spl_magazine_destroy(skc);
spl_slab_reclaim(skc);
} else {
ASSERT(skc->skc_flags & KMC_SLAB);
kmem_cache_destroy(skc->skc_linux_cache);
}
spin_lock(&skc->skc_lock);
/*
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
ASSERT3U(skc->skc_slab_alloc, ==, 0);
ASSERT3U(skc->skc_obj_alloc, ==, 0);
ASSERT3U(skc->skc_slab_total, ==, 0);
ASSERT3U(skc->skc_obj_total, ==, 0);
ASSERT3U(skc->skc_obj_emergency, ==, 0);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
percpu_counter_destroy(&skc->skc_linux_alloc);
spin_unlock(&skc->skc_lock);
kfree(skc->skc_name);
kfree(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
/*
* Allocate an object from a slab attached to the cache. This is used to
* repopulate the per-cpu magazine caches in batches when they run low.
*/
static void *
spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
{
spl_kmem_obj_t *sko;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
ASSERT(sko->sko_magic == SKO_MAGIC);
ASSERT(sko->sko_addr != NULL);
/* Remove from sks_free_list */
list_del_init(&sko->sko_list);
sks->sks_age = jiffies;
sks->sks_ref++;
skc->skc_obj_alloc++;
/* Track max obj usage statistics */
if (skc->skc_obj_alloc > skc->skc_obj_max)
skc->skc_obj_max = skc->skc_obj_alloc;
/* Track max slab usage statistics */
if (sks->sks_ref == 1) {
skc->skc_slab_alloc++;
if (skc->skc_slab_alloc > skc->skc_slab_max)
skc->skc_slab_max = skc->skc_slab_alloc;
}
return (sko->sko_addr);
}
/*
* Generic slab allocation function to run by the global work queues.
* It is responsible for allocating a new slab, linking it in to the list
* of partial slabs, and then waking any waiters.
*/
static int
__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_slab_t *sks;
fstrans_cookie_t cookie = spl_fstrans_mark();
sks = spl_slab_alloc(skc, flags);
spl_fstrans_unmark(cookie);
spin_lock(&skc->skc_lock);
if (sks) {
skc->skc_slab_total++;
skc->skc_obj_total += sks->sks_objs;
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
smp_mb__before_atomic();
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
smp_mb__after_atomic();
}
spin_unlock(&skc->skc_lock);
return (sks == NULL ? -ENOMEM : 0);
}
static void
spl_cache_grow_work(void *data)
{
spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
spl_kmem_cache_t *skc = ska->ska_cache;
int error = __spl_cache_grow(skc, ska->ska_flags);
atomic_dec(&skc->skc_ref);
smp_mb__before_atomic();
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
if (error == 0)
wake_up_all(&skc->skc_waitq);
kfree(ska);
}
/*
* Returns non-zero when a new slab should be available.
*/
static int
spl_cache_grow_wait(spl_kmem_cache_t *skc)
{
return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
}
/*
* No available objects on any slabs, create a new slab. Note that this
* functionality is disabled for KMC_SLAB caches which are backed by the
* Linux slab.
*/
static int
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc = 0;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
- might_sleep();
+
*obj = NULL;
+ /*
+ * Since we can't sleep attempt an emergency allocation to satisfy
+ * the request. The only alterative is to fail the allocation but
+ * it's preferable try. The use of KM_NOSLEEP is expected to be rare.
+ */
+ if (flags & KM_NOSLEEP)
+ return (spl_emergency_alloc(skc, flags, obj));
+
+ might_sleep();
+
/*
* Before allocating a new slab wait for any reaping to complete and
* then return so the local magazine can be rechecked for new objects.
*/
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
return (rc ? rc : -EAGAIN);
}
/*
* Note: It would be nice to reduce the overhead of context switch
* and improve NUMA locality, by trying to allocate a new slab in the
* current process context with KM_NOSLEEP flag.
*
* However, this can't be applied to vmem/kvmem due to a bug that
* spl_vmalloc() doesn't honor gfp flags in page table allocation.
*/
/*
* This is handled by dispatching a work request to the global work
* queue. This allows us to asynchronously allocate a new slab while
* retaining the ability to safely fall back to a smaller synchronous
* allocations to ensure forward progress is always maintained.
*/
if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
spl_kmem_alloc_t *ska;
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
if (ska == NULL) {
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_all(&skc->skc_waitq);
return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
ska->ska_cache = skc;
ska->ska_flags = flags;
taskq_init_ent(&ska->ska_tqe);
taskq_dispatch_ent(spl_kmem_cache_taskq,
spl_cache_grow_work, ska, 0, &ska->ska_tqe);
}
/*
* The goal here is to only detect the rare case where a virtual slab
* allocation has deadlocked. We must be careful to minimize the use
* of emergency objects which are more expensive to track. Therefore,
* we set a very long timeout for the asynchronous allocation and if
* the timeout is reached the cache is flagged as deadlocked. From
* this point only new emergency objects will be allocated until the
* asynchronous allocation completes and clears the deadlocked flag.
*/
if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
rc = spl_emergency_alloc(skc, flags, obj);
} else {
remaining = wait_event_timeout(skc->skc_waitq,
spl_cache_grow_wait(skc), HZ / 10);
if (!remaining) {
spin_lock(&skc->skc_lock);
if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
skc->skc_obj_deadlock++;
}
spin_unlock(&skc->skc_lock);
}
rc = -ENOMEM;
}
return (rc);
}
/*
* Refill a per-cpu magazine with objects from the slabs for this cache.
* Ideally the magazine can be repopulated using existing objects which have
* been released, however if we are unable to locate enough free objects new
* slabs of objects will be created. On success NULL is returned, otherwise
* the address of a single emergency object is returned for use by the caller.
*/
static void *
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
while (refill > 0) {
/* No slabs available we may need to grow the cache */
if (list_empty(&skc->skc_partial_list)) {
spin_unlock(&skc->skc_lock);
local_irq_enable();
rc = spl_cache_grow(skc, flags, &obj);
local_irq_disable();
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
return (obj);
if (rc)
goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
goto out;
/*
* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
* we were sleeping so recalculate max refill.
*/
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock);
continue;
}
/* Grab the next available slab */
sks = list_entry((&skc->skc_partial_list)->next,
spl_kmem_slab_t, sks_list);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref < sks->sks_objs);
ASSERT(!list_empty(&sks->sks_free_list));
/*
* Consume as many objects as needed to refill the requested
* cache. We must also be careful not to overfill it.
*/
while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
++count) {
ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(count < skm->skm_size);
skm->skm_objs[skm->skm_avail++] =
spl_cache_obj(skc, sks);
}
/* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_complete_list);
}
}
spin_unlock(&skc->skc_lock);
out:
return (NULL);
}
/*
* Release an object back to the slab from which it came.
*/
static void
spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
ASSERT(skc->skc_magic == SKC_MAGIC);
sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
list_add(&sko->sko_list, &sks->sks_free_list);
sks->sks_age = jiffies;
sks->sks_ref--;
skc->skc_obj_alloc--;
/*
* Move slab to skc_partial_list when no longer full. Slabs
* are added to the head to keep the partial list is quasi-full
* sorted order. Fuller at the head, emptier at the tail.
*/
if (sks->sks_ref == (sks->sks_objs - 1)) {
list_del(&sks->sks_list);
list_add(&sks->sks_list, &skc->skc_partial_list);
}
/*
* Move empty slabs to the end of the partial list so
* they can be easily found and freed during reclamation.
*/
if (sks->sks_ref == 0) {
list_del(&sks->sks_list);
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
}
/*
* Allocate an object from the per-cpu magazine, or if the magazine
* is empty directly allocate from a slab and repopulate the magazine.
*/
void *
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
* callers will never fail.
*/
if (skc->skc_flags & KMC_SLAB) {
struct kmem_cache *slc = skc->skc_linux_cache;
do {
obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
if (obj != NULL) {
/*
* Even though we leave everything up to the
* underlying cache we still keep track of
* how many objects we've allocated in it for
* better debuggability.
*/
percpu_counter_inc(&skc->skc_linux_alloc);
}
goto ret;
}
local_irq_disable();
restart:
/*
* Safe to update per-cpu structure without lock, but
* in the restart case we must be careful to reacquire
* the local magazine since this may have changed
* when we need to grow the cache.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
obj = skm->skm_objs[--skm->skm_avail];
} else {
obj = spl_cache_refill(skc, skm, flags);
if ((obj == NULL) && !(flags & KM_NOSLEEP))
goto restart;
local_irq_enable();
goto ret;
}
local_irq_enable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
ret:
/* Pre-emptively migrate object to CPU L1 cache */
if (obj) {
if (obj && skc->skc_ctor)
skc->skc_ctor(obj, skc->skc_private, flags);
else
prefetchw(obj);
}
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
* Free an object back to the local per-cpu magazine, there is no
* guarantee that this is the same magazine the object was originally
* allocated from. We may need to flush entire from the magazine
* back to the slabs to make space.
*/
void
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_magazine_t *skm;
unsigned long flags;
int do_reclaim = 0;
int do_emergency = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
/*
* Run the destructor
*/
if (skc->skc_dtor)
skc->skc_dtor(obj, skc->skc_private);
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
percpu_counter_dec(&skc->skc_linux_alloc);
return;
}
/*
* While a cache has outstanding emergency objects all freed objects
* must be checked. However, since emergency objects will never use
* a virtual address these objects can be safely excluded as an
* optimization.
*/
if (!is_vmalloc_addr(obj)) {
spin_lock(&skc->skc_lock);
do_emergency = (skc->skc_obj_emergency > 0);
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
return;
}
local_irq_save(flags);
/*
* Safe to update per-cpu structure without lock, but
* no remote memory allocation tracking is being performed
* it is entirely possible to allocate an object from one
* CPU cache and return it to another.
*/
skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/*
* Per-CPU cache full, flush it to make space for this object,
* this may result in an empty slab which can be reclaimed once
* interrupts are re-enabled.
*/
if (unlikely(skm->skm_avail >= skm->skm_size)) {
spl_cache_flush(skc, skm, skm->skm_refill);
do_reclaim = 1;
}
/* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj;
local_irq_restore(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
}
EXPORT_SYMBOL(spl_kmem_cache_free);
/*
* Depending on how many and which objects are released it may simply
* repopulate the local magazine which will then need to age-out. Objects
* which cannot fit in the magazine will be released back to their slabs
* which will also need to age out before being released. This is all just
* best effort and we do not want to thrash creating and destroying slabs.
*/
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
if (skc->skc_flags & KMC_SLAB)
return;
atomic_inc(&skc->skc_ref);
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
goto out;
/* Reclaim from the magazine and free all now empty slabs. */
unsigned long irq_flags;
local_irq_save(irq_flags);
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
spl_cache_flush(skc, skm, skm->skm_avail);
local_irq_restore(irq_flags);
spl_slab_reclaim(skc);
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
smp_mb__after_atomic();
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
/*
* This is stubbed out for code consistency with other platforms. There
* is existing logic to prevent concurrent reaping so while this is ugly
* it should do no harm.
*/
int
spl_kmem_cache_reap_active(void)
{
return (0);
}
EXPORT_SYMBOL(spl_kmem_cache_reap_active);
/*
* Reap all free slabs from all registered caches.
*/
void
spl_kmem_reap(void)
{
spl_kmem_cache_t *skc = NULL;
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
spl_kmem_cache_reap_now(skc);
}
up_read(&spl_kmem_cache_sem);
}
EXPORT_SYMBOL(spl_kmem_reap);
int
spl_kmem_cache_init(void)
{
init_rwsem(&spl_kmem_cache_sem);
INIT_LIST_HEAD(&spl_kmem_cache_list);
spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
spl_kmem_cache_kmem_threads, maxclsyspri,
spl_kmem_cache_kmem_threads * 8, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (spl_kmem_cache_taskq == NULL)
return (-ENOMEM);
return (0);
}
void
spl_kmem_cache_fini(void)
{
taskq_destroy(spl_kmem_cache_taskq);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
index b7d6053529b4..29a8802b8367 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
@@ -1,540 +1,544 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#ifdef _KERNEL
#include <sys/shrinker.h>
#include <sys/vmsystm.h>
#include <sys/zpl.h>
#include <linux/page_compat.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#endif
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
/*
* This is a limit on how many pages the ARC shrinker makes available for
* eviction in response to one page allocation attempt. Note that in
* practice, the kernel's shrinker can ask us to evict up to about 4x this
* for one allocation attempt.
*
* The default limit of 10,000 (in practice, 160MB per allocation attempt
* with 4K pages) limits the amount of time spent attempting to reclaim ARC
* memory to less than 100ms per allocation attempt, even with a small
* average compressed block size of ~8KB.
*
* See also the comment in arc_shrinker_count().
* Set to 0 to disable limit.
*/
int zfs_arc_shrinker_limit = 10000;
#ifdef CONFIG_MEMORY_HOTPLUG
static struct notifier_block arc_hotplug_callback_mem_nb;
#endif
/*
* Return a default max arc size based on the amount of physical memory.
*/
uint64_t
arc_default_max(uint64_t min, uint64_t allmem)
{
/* Default to 1/2 of all memory. */
return (MAX(allmem / 2, min));
}
#ifdef _KERNEL
/*
* Return maximum amount of memory that we could possibly use. Reduced
* to half of all memory in user space which is primarily used for testing.
*/
uint64_t
arc_all_memory(void)
{
#ifdef CONFIG_HIGHMEM
return (ptob(zfs_totalram_pages - zfs_totalhigh_pages));
#else
return (ptob(zfs_totalram_pages));
#endif /* CONFIG_HIGHMEM */
}
/*
* Return the amount of memory that is considered free. In user space
* which is primarily used for testing we pretend that free memory ranges
* from 0-20% of all memory.
*/
uint64_t
arc_free_memory(void)
{
#ifdef CONFIG_HIGHMEM
struct sysinfo si;
si_meminfo(&si);
return (ptob(si.freeram - si.freehigh));
#else
return (ptob(nr_free_pages() +
nr_inactive_file_pages()));
#endif /* CONFIG_HIGHMEM */
}
/*
* Return the amount of memory that can be consumed before reclaim will be
* needed. Positive if there is sufficient free memory, negative indicates
* the amount of memory that needs to be freed up.
*/
int64_t
arc_available_memory(void)
{
return (arc_free_memory() - arc_sys_free);
}
static uint64_t
arc_evictable_memory(void)
{
int64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t arc_clean =
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
/*
* Scale reported evictable memory in proportion to page cache, cap
* at specified min/max.
*/
uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
min = MAX(arc_c_min, MIN(arc_c_max, min));
if (arc_dirty >= min)
return (arc_clean);
return (MAX((int64_t)asize - (int64_t)min, 0));
}
/*
* The _count() function returns the number of free-able objects.
* The _scan() function returns the number of objects that were freed.
*/
static unsigned long
arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* __GFP_FS won't be set if we are called from ZFS code (see
* kmem_flags_convert(), which removes it). To avoid a deadlock, we
* don't allow evicting in this case. We return 0 rather than
* SHRINK_STOP so that the shrinker logic doesn't accumulate a
* deficit against us.
*/
if (!(sc->gfp_mask & __GFP_FS)) {
return (0);
}
/*
* This code is reached in the "direct reclaim" case, where the
* kernel (outside ZFS) is trying to allocate a page, and the system
* is low on memory.
*
* The kernel's shrinker code doesn't understand how many pages the
* ARC's callback actually frees, so it may ask the ARC to shrink a
* lot for one page allocation. This is problematic because it may
* take a long time, thus delaying the page allocation, and because
* it may force the ARC to unnecessarily shrink very small.
*
* Therefore, we limit the amount of data that we say is evictable,
* which limits the amount that the shrinker will ask us to evict for
* one page allocation attempt.
*
* In practice, we may be asked to shrink 4x the limit to satisfy one
* page allocation, before the kernel's shrinker code gives up on us.
* When that happens, we rely on the kernel code to find the pages
* that we freed before invoking the OOM killer. This happens in
* __alloc_pages_slowpath(), which retries and finds the pages we
* freed when it calls get_page_from_freelist().
*
* See also the comment above zfs_arc_shrinker_limit.
*/
int64_t limit = zfs_arc_shrinker_limit != 0 ?
zfs_arc_shrinker_limit : INT64_MAX;
return (MIN(limit, btop((int64_t)arc_evictable_memory())));
}
static unsigned long
arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
ASSERT((sc->gfp_mask & __GFP_FS) != 0);
/* The arc is considered warm once reclaim has occurred */
if (unlikely(arc_warm == B_FALSE))
arc_warm = B_TRUE;
/*
* Evict the requested number of pages by reducing arc_c and waiting
* for the requested amount of data to be evicted.
*/
arc_reduce_target_size(ptob(sc->nr_to_scan));
arc_wait_for_eviction(ptob(sc->nr_to_scan), B_FALSE);
if (current->reclaim_state != NULL)
+#ifdef HAVE_RECLAIM_STATE_RECLAIMED
+ current->reclaim_state->reclaimed += sc->nr_to_scan;
+#else
current->reclaim_state->reclaimed_slab += sc->nr_to_scan;
+#endif
/*
* We are experiencing memory pressure which the arc_evict_zthr was
* unable to keep up with. Set arc_no_grow to briefly pause arc
* growth to avoid compounding the memory pressure.
*/
arc_no_grow = B_TRUE;
/*
* When direct reclaim is observed it usually indicates a rapid
* increase in memory pressure. This occurs because the kswapd
* threads were unable to asynchronously keep enough free memory
* available.
*/
if (current_is_kswapd()) {
ARCSTAT_BUMP(arcstat_memory_indirect_count);
} else {
ARCSTAT_BUMP(arcstat_memory_direct_count);
}
return (sc->nr_to_scan);
}
SPL_SHRINKER_DECLARE(arc_shrinker,
arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
uint64_t free_memory = arc_free_memory();
if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
return (0);
if (txg > spa->spa_lowmem_last_txg) {
spa->spa_lowmem_last_txg = txg;
spa->spa_lowmem_page_load = 0;
}
/*
* If we are in pageout, we know that memory is already tight,
* the arc is already going to be evicting, so we just want to
* continue to let page writes occur as quickly as possible.
*/
if (current_is_kswapd()) {
if (spa->spa_lowmem_page_load >
MAX(arc_sys_free / 4, free_memory) / 4) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(ERESTART));
}
/* Note: reserve is inflated, so we deflate */
atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8);
return (0);
} else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) {
/* memory is low, delay before restarting */
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(EAGAIN));
}
spa->spa_lowmem_page_load = 0;
return (0);
}
static void
arc_set_sys_free(uint64_t allmem)
{
/*
* The ARC tries to keep at least this much memory available for the
* system. This gives the ARC time to shrink in response to memory
* pressure, before running completely out of memory and invoking the
* direct-reclaim ARC shrinker.
*
* This should be more than twice high_wmark_pages(), so that
* arc_wait_for_eviction() will wait until at least the
* high_wmark_pages() are free (see arc_evict_state_impl()).
*
* Note: Even when the system is very low on memory, the kernel's
* shrinker code may only ask for one "batch" of pages (512KB) to be
* evicted. If concurrent allocations consume these pages, there may
* still be insufficient free pages, and the OOM killer takes action.
*
* By setting arc_sys_free large enough, and having
* arc_wait_for_eviction() wait until there is at least arc_sys_free/2
* free memory, it is much less likely that concurrent allocations can
* consume all the memory that was evicted before checking for
* OOM.
*
* It's hard to iterate the zones from a linux kernel module, which
* makes it difficult to determine the watermark dynamically. Instead
* we compute the maximum high watermark for this system, based
* on the amount of memory, assuming default parameters on Linux kernel
* 5.3.
*/
/*
* Base wmark_low is 4 * the square root of Kbytes of RAM.
*/
long wmark = 4 * int_sqrt(allmem/1024) * 1024;
/*
* Clamp to between 128K and 64MB.
*/
wmark = MAX(wmark, 128 * 1024);
wmark = MIN(wmark, 64 * 1024 * 1024);
/*
* watermark_boost can increase the wmark by up to 150%.
*/
wmark += wmark * 150 / 100;
/*
* arc_sys_free needs to be more than 2x the watermark, because
* arc_wait_for_eviction() waits for half of arc_sys_free. Bump this up
* to 3x to ensure we're above it.
*/
arc_sys_free = wmark * 3 + allmem / 32;
}
void
arc_lowmem_init(void)
{
uint64_t allmem = arc_all_memory();
/*
* Register a shrinker to support synchronous (direct) memory
* reclaim from the arc. This is done to prevent kswapd from
* swapping out pages when it is preferable to shrink the arc.
*/
spl_register_shrinker(&arc_shrinker);
arc_set_sys_free(allmem);
}
void
arc_lowmem_fini(void)
{
spl_unregister_shrinker(&arc_shrinker);
}
int
param_set_arc_u64(const char *buf, zfs_kernel_param_t *kp)
{
int error;
error = spl_param_set_u64(buf, kp);
if (error < 0)
return (SET_ERROR(error));
arc_tuning_update(B_TRUE);
return (0);
}
int
param_set_arc_min(const char *buf, zfs_kernel_param_t *kp)
{
return (param_set_arc_u64(buf, kp));
}
int
param_set_arc_max(const char *buf, zfs_kernel_param_t *kp)
{
return (param_set_arc_u64(buf, kp));
}
int
param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
{
int error;
error = param_set_int(buf, kp);
if (error < 0)
return (SET_ERROR(error));
arc_tuning_update(B_TRUE);
return (0);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static int
arc_hotplug_callback(struct notifier_block *self, unsigned long action,
void *arg)
{
(void) self, (void) arg;
uint64_t allmem = arc_all_memory();
if (action != MEM_ONLINE)
return (NOTIFY_OK);
arc_set_limits(allmem);
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
arc_set_sys_free(allmem);
return (NOTIFY_OK);
}
#endif
void
arc_register_hotplug(void)
{
#ifdef CONFIG_MEMORY_HOTPLUG
arc_hotplug_callback_mem_nb.notifier_call = arc_hotplug_callback;
/* There is no significance to the value 100 */
arc_hotplug_callback_mem_nb.priority = 100;
register_memory_notifier(&arc_hotplug_callback_mem_nb);
#endif
}
void
arc_unregister_hotplug(void)
{
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&arc_hotplug_callback_mem_nb);
#endif
}
#else /* _KERNEL */
int64_t
arc_available_memory(void)
{
int64_t lowest = INT64_MAX;
/* Every 100 calls, free a small amount */
if (random_in_range(100) == 0)
lowest = -1024;
return (lowest);
}
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
(void) spa, (void) reserve, (void) txg;
return (0);
}
uint64_t
arc_all_memory(void)
{
return (ptob(physmem) / 2);
}
uint64_t
arc_free_memory(void)
{
return (random_in_range(arc_all_memory() * 20 / 100));
}
void
arc_register_hotplug(void)
{
}
void
arc_unregister_hotplug(void)
{
}
#endif /* _KERNEL */
/*
* Helper function for arc_prune_async() it is responsible for safely
* handling the execution of a registered arc_prune_func_t.
*/
static void
arc_prune_task(void *ptr)
{
arc_prune_t *ap = (arc_prune_t *)ptr;
arc_prune_func_t *func = ap->p_pfunc;
if (func != NULL)
func(ap->p_adjust, ap->p_private);
zfs_refcount_remove(&ap->p_refcnt, func);
}
/*
* Notify registered consumers they must drop holds on a portion of the ARC
* buffered they reference. This provides a mechanism to ensure the ARC can
* honor the metadata limit and reclaim otherwise pinned ARC buffers. This
* is analogous to dnlc_reduce_cache() but more generic.
*
* This operation is performed asynchronously so it may be safely called
* in the context of the arc_reclaim_thread(). A reference is taken here
* for each registered arc_prune_t and the arc_prune_task() is responsible
* for releasing it once the registered arc_prune_func_t has completed.
*/
void
arc_prune_async(uint64_t adjust)
{
arc_prune_t *ap;
mutex_enter(&arc_prune_mtx);
for (ap = list_head(&arc_prune_list); ap != NULL;
ap = list_next(&arc_prune_list, ap)) {
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
continue;
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
ap->p_adjust = adjust;
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
ap, TQ_SLEEP) == TASKQID_INVALID) {
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
continue;
}
ARCSTAT_BUMP(arcstat_prune);
}
mutex_exit(&arc_prune_mtx);
}
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, shrinker_limit, INT, ZMOD_RW,
"Limit on number of pages that ARC shrinker can reclaim at once");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index ff26f47f2e04..a1fd3c9856cc 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1,3036 +1,3034 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/sid.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/trace_acl.h>
#include <sys/zpl.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
#define IDMAP_WK_CREATOR_OWNER_UID 2147483648U
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
static size_t
zfs_ace_v0_size(void *acep)
{
(void) acep;
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
static int
zfs_ace_v0_data(void *acep, void **datap)
{
(void) acep;
*datap = NULL;
return (0);
}
static const acl_ops_t zfs_acl_v0_ops = {
.ace_mask_get = zfs_ace_v0_get_mask,
.ace_mask_set = zfs_ace_v0_set_mask,
.ace_flags_get = zfs_ace_v0_get_flags,
.ace_flags_set = zfs_ace_v0_set_flags,
.ace_type_get = zfs_ace_v0_get_type,
.ace_type_set = zfs_ace_v0_set_type,
.ace_who_get = zfs_ace_v0_get_who,
.ace_who_set = zfs_ace_v0_set_who,
.ace_size = zfs_ace_v0_size,
.ace_abstract_size = zfs_ace_v0_abstract_size,
.ace_mask_off = zfs_ace_v0_mask_off,
.ace_data = zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
zfs_fallthrough;
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static const acl_ops_t zfs_acl_fuid_ops = {
.ace_mask_get = zfs_ace_fuid_get_mask,
.ace_mask_set = zfs_ace_fuid_set_mask,
.ace_flags_get = zfs_ace_fuid_get_flags,
.ace_flags_set = zfs_ace_fuid_set_flags,
.ace_type_get = zfs_ace_fuid_get_type,
.ace_type_set = zfs_ace_fuid_set_type,
.ace_who_get = zfs_ace_fuid_get_who,
.ace_who_set = zfs_ace_fuid_set_who,
.ace_size = zfs_ace_fuid_size,
.ace_abstract_size = zfs_ace_fuid_abstract_size,
.ace_mask_off = zfs_ace_fuid_mask_off,
.ace_data = zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
- while ((aclnode = list_head(&aclp->z_acl))) {
- list_remove(&aclp->z_acl, aclnode);
+ while ((aclnode = list_remove_head(&aclp->z_acl)))
zfs_acl_node_free(aclnode);
- }
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT(aclp);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
static uintptr_t
zfs_ace_walk(void *datap, uintptr_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
(void) aclcnt;
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
memcpy(zobjacep->z_object_type, aceobjp->a_obj_type,
sizeof (aceobjp->a_obj_type));
memcpy(zobjacep->z_inherit_type,
aceobjp->a_inherit_obj_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
memcpy(objacep->a_obj_type,
zobjacep->z_object_type,
sizeof (zobjacep->z_object_type));
memcpy(objacep->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* every time.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static int
zfs_v4_to_unix(uint32_t access_mask, int *unmapped)
{
int new_mask = 0;
*unmapped = access_mask &
(ACE_WRITE_OWNER | ACE_WRITE_ACL | ACE_DELETE);
if (access_mask & WRITE_MASK)
new_mask |= S_IWOTH;
if (access_mask & ACE_READ_DATA)
new_mask |= S_IROTH;
if (access_mask & ACE_EXECUTE)
new_mask |= S_IXOTH;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize = 0;
int acl_count = 0;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
boolean_t drop_lock = B_FALSE;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
/*
* close race where znode could be upgrade while trying to
* read the znode attributes.
*
* But this could only happen if the file isn't already an SA
* znode
*/
if (!zp->z_is_sa && !have_lock) {
mutex_enter(&zp->z_lock);
drop_lock = B_TRUE;
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
memcpy(aclnode->z_acldata, znode_acl.z_ace_data,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
if (drop_lock)
mutex_exit(&zp->z_lock);
return (error);
}
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
(void) buflen;
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
ASSERT3P(cb->cb_acl_node, !=, NULL);
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIX)
return (0);
ASSERT(MUTEX_HELD(&zp->z_lock));
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error == 0 && aclp->z_acl_count > 0)
zp->z_mode = ZTOI(zp)->i_mode =
zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, KUID_TO_SUID(ZTOI(zp)->i_uid),
KGID_TO_SGID(ZTOI(zp)->i_gid));
/*
* Some ZFS implementations (ZEVO) create neither a ZNODE_ACL
* nor a DACL_ACES SA in which case ENOENT is returned from
* zfs_acl_node_read() when the SA can't be located.
* Allow chown/chgrp to succeed in these cases rather than
* returning an error that makes no sense in the context of
* the caller.
*/
if (error == ENOENT)
return (0);
return (error);
}
typedef struct trivial_acl {
uint32_t allow0; /* allow mask for bits only in owner */
uint32_t deny1; /* deny mask for bits not in owner */
uint32_t deny2; /* deny mask for bits not in group */
uint32_t owner; /* allow mask matching mode */
uint32_t group; /* allow mask matching mode */
uint32_t everyone; /* allow mask matching mode */
} trivial_acl_t;
static void
acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
{
uint32_t read_mask = ACE_READ_DATA;
uint32_t write_mask = ACE_WRITE_DATA|ACE_APPEND_DATA;
uint32_t execute_mask = ACE_EXECUTE;
if (isdir)
write_mask |= ACE_DELETE_CHILD;
masks->deny1 = 0;
if (!(mode & S_IRUSR) && (mode & (S_IRGRP|S_IROTH)))
masks->deny1 |= read_mask;
if (!(mode & S_IWUSR) && (mode & (S_IWGRP|S_IWOTH)))
masks->deny1 |= write_mask;
if (!(mode & S_IXUSR) && (mode & (S_IXGRP|S_IXOTH)))
masks->deny1 |= execute_mask;
masks->deny2 = 0;
if (!(mode & S_IRGRP) && (mode & S_IROTH))
masks->deny2 |= read_mask;
if (!(mode & S_IWGRP) && (mode & S_IWOTH))
masks->deny2 |= write_mask;
if (!(mode & S_IXGRP) && (mode & S_IXOTH))
masks->deny2 |= execute_mask;
masks->allow0 = 0;
if ((mode & S_IRUSR) && (!(mode & S_IRGRP) && (mode & S_IROTH)))
masks->allow0 |= read_mask;
if ((mode & S_IWUSR) && (!(mode & S_IWGRP) && (mode & S_IWOTH)))
masks->allow0 |= write_mask;
if ((mode & S_IXUSR) && (!(mode & S_IXGRP) && (mode & S_IXOTH)))
masks->allow0 |= execute_mask;
masks->owner = ACE_WRITE_ATTRIBUTES|ACE_WRITE_OWNER|ACE_WRITE_ACL|
ACE_WRITE_NAMED_ATTRS|ACE_READ_ACL|ACE_READ_ATTRIBUTES|
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE;
if (mode & S_IRUSR)
masks->owner |= read_mask;
if (mode & S_IWUSR)
masks->owner |= write_mask;
if (mode & S_IXUSR)
masks->owner |= execute_mask;
masks->group = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IRGRP)
masks->group |= read_mask;
if (mode & S_IWGRP)
masks->group |= write_mask;
if (mode & S_IXGRP)
masks->group |= execute_mask;
masks->everyone = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IROTH)
masks->everyone |= read_mask;
if (mode & S_IWOTH)
masks->everyone |= write_mask;
if (mode & S_IXOTH)
masks->everyone |= execute_mask;
}
/*
* ace_trivial:
* determine whether an ace_t acl is trivial
*
* Trivialness implies that the acl is composed of only
* owner, group, everyone entries. ACL can't
* have read_acl denied, and write_owner/write_acl/write_attributes
* can only be owner@ entry.
*/
static int
ace_trivial_common(void *acep, int aclcnt,
uintptr_t (*walk)(void *, uintptr_t, int,
uint16_t *, uint16_t *, uint32_t *))
{
uint16_t flags;
uint32_t mask;
uint16_t type;
uint64_t cookie = 0;
while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))) {
switch (flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
case ACE_EVERYONE:
break;
default:
return (1);
}
if (flags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE|
ACE_INHERIT_ONLY_ACE))
return (1);
/*
* Special check for some special bits
*
* Don't allow anybody to deny reading basic
* attributes or a files ACL.
*/
if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
(type == ACE_ACCESS_DENIED_ACE_TYPE))
return (1);
/*
* Delete permission is never set by default
*/
if (mask & ACE_DELETE)
return (1);
/*
* Child delete permission should be accompanied by write
*/
if ((mask & ACE_DELETE_CHILD) && !(mask & ACE_WRITE_DATA))
return (1);
/*
* only allow owner@ to have
* write_acl/write_owner/write_attributes/write_xattr/
*/
if (type == ACE_ACCESS_ALLOWED_ACE_TYPE &&
(!(flags & ACE_OWNER) && (mask &
(ACE_WRITE_OWNER|ACE_WRITE_ACL| ACE_WRITE_ATTRIBUTES|
ACE_WRITE_NAMED_ATTRS))))
return (1);
}
return (0);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid));
zp->z_mode = ZTOI(zp)->i_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(boolean_t isdir, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
trivial_acl_t masks;
new_count = new_bytes = 0;
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions to be no greater than
* group permissions.
* The "aclinherit" and "aclmode" properties
* affect policy for create and chmod(2),
* respectively.
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(S_ISDIR(ZTOI(zp)->i_mode), mode, B_TRUE,
(ZTOZSB(zp)->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = S_ISDIR(va_mode);
boolean_t isreg = S_ISREG(va_mode);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || S_ISLNK(va_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(va_mode, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
memcpy(data2, data1, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zidmap_t *mnt_ns)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_acl_t *paclp;
gid_t gid = vap->va_gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp,
cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
acl_ids->z_fuid = vap->va_uid;
acl_ids->z_fgid = vap->va_gid;
#ifdef HAVE_KSID
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid,
cr, ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != KGID_TO_SGID(ZTOI(dzp)->i_gid) &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
if (dzp->z_mode & S_ISGID) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = KGID_TO_SGID(
ZTOI(dzp)->i_gid);
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid,
FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
}
}
#endif /* HAVE_KSID */
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(cr, gid, mnt_ns,
zfs_i_user_ns(ZTOI(dzp))) != 0) {
acl_ids->z_mode &= ~S_ISGID;
}
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (S_ISDIR(vap->va_mode))
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_mode, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr,
zfs_init_idmap)))
return (error);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
memcpy(start, aclnode->z_acldata,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp ==
aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
zfs_init_idmap)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0);
ASSERT(zp->z_acl_cached == NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
(!Z_ISDEV(ZTOI(zp)->i_mode) || (v4_mode & WRITE_MASK_ATTRS))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCES if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr, zidmap_t *mnt_ns)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
if (mnt_ns) {
fowner = zfs_uid_to_vfsuid(mnt_ns, zfs_i_user_ns(ZTOI(zp)),
KUID_TO_SUID(ZTOI(zp)->i_uid));
gowner = zfs_gid_to_vfsgid(mnt_ns, zfs_i_user_ns(ZTOI(zp)),
KGID_TO_SGID(ZTOI(zp)->i_gid));
} else
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT(zp->z_acl_cached);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (S_ISDIR(ZTOI(zp)->i_mode) &&
(iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
zfs_fallthrough;
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr,
zfs_init_idmap) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(ZTOZSB(zp),
KUID_TO_SUID(ZTOI(zp)->i_uid), cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
/*
* Simplified access check for case where ACL is known to not contain
* information beyond what is defined in the mode. In this case, we
* can pass along to the kernel / vfs generic_permission() check, which
* evaluates the mode and POSIX ACL.
*
* NFSv4 ACLs allow granting permissions that are usually relegated only
* to the file owner or superuser. Examples are ACE_WRITE_OWNER (chown),
* ACE_WRITE_ACL(chmod), and ACE_DELETE. ACE_DELETE requests must fail
* because with conventional posix permissions, right to delete file
* is determined by write bit on the parent dir.
*
* If unmappable perms are requested, then we must return EPERM
* and include those bits in the working_mode so that the caller of
* zfs_zaccess_common() can decide whether to perform additional
* policy / capability checks. EACCES is used in zfs_zaccess_aces_check()
* to indicate access check failed due to explicit DENY entry, and so
* we want to avoid that here.
*/
static int
zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr,
zidmap_t *mnt_ns)
{
int err, mask;
int unmapped = 0;
ASSERT(zp->z_pflags & ZFS_ACL_TRIVIAL);
mask = zfs_v4_to_unix(*working_mode, &unmapped);
if (mask == 0 || unmapped) {
*working_mode = unmapped;
return (unmapped ? SET_ERROR(EPERM) : 0);
}
#if (defined(HAVE_IOPS_PERMISSION_USERNS) || \
defined(HAVE_IOPS_PERMISSION_IDMAP))
err = generic_permission(mnt_ns, ZTOI(zp), mask);
#else
err = generic_permission(ZTOI(zp), mask);
#endif
if (err != 0) {
return (SET_ERROR(EPERM));
}
*working_mode = unmapped;
return (0);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr, zidmap_t *mnt_ns)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
if (zp->z_pflags & ZFS_ACL_TRIVIAL)
return (zfs_zaccess_trivial(zp, working_mode, cr, mnt_ns));
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr, mnt_ns));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr, zidmap_t *mnt_ns)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr, mnt_ns));
}
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t owner = B_FALSE;
boolean_t groupmbr = B_FALSE;
boolean_t is_attr;
uid_t uid = crgetuid(cr);
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
mutex_enter(&zdp->z_acl_lock);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
if (KUID_TO_SUID(ZTOI(zdp)->i_uid) != 0 ||
KGID_TO_SGID(ZTOI(zdp)->i_gid) != 0) {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
if (uid == KUID_TO_SUID(ZTOI(zdp)->i_uid)) {
if (zdp->z_mode & S_IXUSR) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (groupmember(KGID_TO_SGID(ZTOI(zdp)->i_gid), cr)) {
if (zdp->z_mode & S_IXGRP) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (!owner && !groupmbr) {
if (zdp->z_mode & S_IXOTH) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
}
mutex_exit(&zdp->z_acl_lock);
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
if ((error = zfs_enter(ZTOZSB(zdp), FTAG)) != 0)
return (error);
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
zfs_init_idmap);
zfs_exit(ZTOZSB(zdp), FTAG);
return (error);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
zidmap_t *mnt_ns)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode));
/*
* If attribute then validate against base file
*/
if (is_attr) {
if ((error = zfs_zget(ZTOZSB(zp),
zp->z_xattr_parent, &xzp)) != 0) {
return (error);
}
check_zp = xzp;
/*
* fixup mode to map to xattr perms
*/
if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) {
mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA);
mode |= ACE_WRITE_NAMED_ATTRS;
}
if (mode & (ACE_READ_DATA|ACE_EXECUTE)) {
mode &= ~(ACE_READ_DATA|ACE_EXECUTE);
mode |= ACE_READ_NAMED_ATTRS;
}
}
owner = zfs_uid_to_vfsuid(mnt_ns, zfs_i_user_ns(ZTOI(zp)),
KUID_TO_SUID(ZTOI(zp)->i_uid));
owner = zfs_fuid_map_id(ZTOZSB(zp), owner, cr, ZFS_OWNER);
/*
* Map the bits required to the standard inode flags
* S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
* mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr, mnt_ns)) == 0) {
if (is_attr)
zrele(xzp);
return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
zrele(xzp);
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr,
mnt_ns);
}
if (error && check_privs) {
mode_t checkmode = 0;
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
ASSERT(working_mode != 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
checkmode |= S_IXUSR;
error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
zrele(xzp);
return (error);
}
/*
* Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr,
zidmap_t *mnt_ns)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr,
mnt_ns));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(void *zp, int mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr, zfs_init_idmap));
}
/* See zfs_zaccess_delete() */
static const boolean_t zfs_write_implies_delete_child = B_TRUE;
/*
* Determine whether delete access should be granted.
*
* The following chart outlines how we handle delete permissions which is
* how recent versions of windows (Windows 2008) handles it. The efficiency
* comes from not having to check the parent ACL where the object itself grants
* delete:
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Deny * | Permit |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Deny * | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* Re. execute permission on the directory: if that's missing,
* the vnode lookup of the target will fail before we get here.
*
* Re [*] in the table above: NFSv4 would normally Permit delete for
* these two cells of the matrix.
* See acl.h for notes on which ACE_... flags should be checked for which
* operations. Specifically, the NFSv4 committee recommendation is in
* conflict with the Windows interpretation of DENY ACEs, where DENY ACEs
* should take precedence ahead of ALLOW ACEs.
*
* This implementation always consults the target object's ACL first.
* If a DENY ACE is present on the target object that specifies ACE_DELETE,
* delete access is denied. If an ALLOW ACE with ACE_DELETE is present on
* the target object, access is allowed. If and only if no entries with
* ACE_DELETE are present in the object's ACL, check the container's ACL
* for entries with ACE_DELETE_CHILD.
*
* A summary of the logic implemented from the table above is as follows:
*
* First check for DENY ACEs that apply.
* If either target or container has a deny, EACCES.
*
* Delete access can then be summarized as follows:
* 1: The object to be deleted grants ACE_DELETE, or
* 2: The containing directory grants ACE_DELETE_CHILD.
* In a Windows system, that would be the end of the story.
* In this system, (2) has some complications...
* 2a: "sticky" bit on a directory adds restrictions, and
* 2b: existing ACEs from previous versions of ZFS may
* not carry ACE_DELETE_CHILD where they should, so we
* also allow delete when ACE_WRITE_DATA is granted.
*
* Note: 2b is technically a work-around for a prior bug,
* which hopefully can go away some day. For those who
* no longer need the work around, and for testing, this
* work-around is made conditional via the tunable:
* zfs_write_implies_delete_child
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zidmap_t *mnt_ns)
{
uint32_t wanted_dirperms;
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
boolean_t dzpcheck_privs;
boolean_t zpcheck_privs;
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* Case 1:
* If target object grants ACE_DELETE then we are done. This is
* indicated by a return value of 0. For this case we don't worry
* about the sticky bit because sticky only applies to the parent
* directory and this is the child access result.
*
* If we encounter a DENY ACE here, we're also done (EACCES).
* Note that if we hit a DENY ACE here (on the target) it should
* take precedence over a DENY ACE on the container, so that when
* we have more complete auditing support we will be able to
* report an access failure against the specific target.
* (This is part of why we're checking the target first.)
*/
zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr, mnt_ns);
if (zp_error == EACCES) {
/* We hit a DENY ACE. */
if (!zpcheck_privs)
return (SET_ERROR(zp_error));
return (secpolicy_vnode_remove(cr));
}
if (zp_error == 0)
return (0);
/*
* Case 2:
* If the containing directory grants ACE_DELETE_CHILD,
* or we're in backward compatibility mode and the
* containing directory has ACE_WRITE_DATA, allow.
* Case 2b is handled with wanted_dirperms.
*/
wanted_dirperms = ACE_DELETE_CHILD;
if (zfs_write_implies_delete_child)
wanted_dirperms |= ACE_WRITE_DATA;
dzp_error = zfs_zaccess_common(dzp, wanted_dirperms,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr, mnt_ns);
if (dzp_error == EACCES) {
/* We hit a DENY ACE. */
if (!dzpcheck_privs)
return (SET_ERROR(dzp_error));
return (secpolicy_vnode_remove(cr));
}
/*
* Cases 2a, 2b (continued)
*
* Note: dzp_working_mode now contains any permissions
* that were NOT granted. Therefore, if any of the
* wanted_dirperms WERE granted, we will have:
* dzp_working_mode != wanted_dirperms
* We're really asking if ANY of those permissions
* were granted, and if so, grant delete access.
*/
if (dzp_working_mode != wanted_dirperms)
dzp_error = 0;
/*
* dzp_error is 0 if the container granted us permissions to "modify".
* If we do not have permission via one or more ACEs, our current
* privileges may still permit us to modify the container.
*
* dzpcheck_privs is false when i.e. the FS is read-only.
* Otherwise, do privilege checks for the container.
*/
if (dzp_error != 0 && dzpcheck_privs) {
uid_t owner;
/*
* The secpolicy call needs the requested access and
* the current access mode of the container, but it
* only knows about Unix-style modes (VEXEC, VWRITE),
* so this must condense the fine-grained ACE bits into
* Unix modes.
*
* The VEXEC flag is easy, because we know that has
* always been checked before we get here (during the
* lookup of the target vnode). The container has not
* granted us permissions to "modify", so we do not set
* the VWRITE flag in the current access mode.
*/
owner = zfs_fuid_map_id(ZTOZSB(dzp),
KUID_TO_SUID(ZTOI(dzp)->i_uid), cr, ZFS_OWNER);
dzp_error = secpolicy_vnode_access2(cr, ZTOI(dzp),
owner, S_IXUSR, S_IWUSR|S_IXUSR);
}
if (dzp_error != 0) {
/*
* Note: We may have dzp_error = -1 here (from
* zfs_zacess_common). Don't return that.
*/
return (SET_ERROR(EACCES));
}
/*
* At this point, we know that the directory permissions allow
* us to modify, but we still need to check for the additional
* restrictions that apply when the "sticky bit" is set.
*
* Yes, zfs_sticky_remove_access() also checks this bit, but
* checking it here and skipping the call below is nice when
* you're watching all of this with dtrace.
*/
if ((dzp->z_mode & S_ISVTX) == 0)
return (0);
/*
* zfs_sticky_remove_access will succeed if:
* 1. The sticky bit is absent.
* 2. We pass the sticky bit restrictions.
* 3. We have privileges that always allow file removal.
*/
return (zfs_sticky_remove_access(dzp, zp, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr, zidmap_t *mnt_ns)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*/
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr, mnt_ns)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
if ((error = zfs_zaccess_delete(tdzp, tzp, cr, mnt_ns)))
return (error);
}
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr, mnt_ns);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 48945b8af8c1..6b6293b9e482 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -1,2208 +1,2123 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/pathname.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/objlist.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static void
zfsvfs_vfs_free(vfs_t *vfsp)
{
if (vfsp != NULL) {
if (vfsp->vfs_mntpoint != NULL)
kmem_strfree(vfsp->vfs_mntpoint);
kmem_free(vfsp, sizeof (vfs_t));
}
}
static int
zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
{
switch (token) {
case TOKEN_RO:
vfsp->vfs_readonly = B_TRUE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_RW:
vfsp->vfs_readonly = B_FALSE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
vfsp->vfs_setuid = B_TRUE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
vfsp->vfs_setuid = B_FALSE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
vfsp->vfs_exec = B_TRUE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
vfsp->vfs_exec = B_FALSE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
vfsp->vfs_devices = B_TRUE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
vfsp->vfs_devices = B_FALSE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
vfsp->vfs_xattr = ZFS_XATTR_SA;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
vfsp->vfs_xattr = ZFS_XATTR_OFF;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
vfsp->vfs_atime = B_TRUE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
vfsp->vfs_atime = B_FALSE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
vfsp->vfs_relatime = B_TRUE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
vfsp->vfs_relatime = B_FALSE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
vfsp->vfs_nbmand = B_TRUE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
vfsp->vfs_nbmand = B_FALSE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
vfsp->vfs_mntpoint = match_strdup(&args[0]);
if (vfsp->vfs_mntpoint == NULL)
return (SET_ERROR(ENOMEM));
break;
default:
break;
}
return (0);
}
/*
* Parse the raw mntopts and return a vfs_t describing the options.
*/
static int
zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
{
vfs_t *tmp_vfsp;
int error;
tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
if (mntopts != NULL) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
tmp_mntopts = t = kmem_strdup(mntopts);
if (tmp_mntopts == NULL)
return (SET_ERROR(ENOMEM));
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
if (error) {
kmem_strfree(tmp_mntopts);
zfsvfs_vfs_free(tmp_vfsp);
return (error);
}
}
kmem_strfree(tmp_mntopts);
}
*vfsp = tmp_vfsp;
return (0);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
}
int
zfs_sync(struct super_block *sb, int wait, cred_t *cr)
{
(void) cr;
zfsvfs_t *zfsvfs = sb->s_fs_info;
/*
* Semantically, the only requirement is that the sync be initiated.
* The DMU syncs out txgs frequently, so there's nothing to do.
*/
if (!wait)
return (0);
if (zfsvfs != NULL) {
/*
* Sync a specific filesystem.
*/
dsl_pool_t *dp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (spa_suspended(dp->dp_spa)) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
zfs_exit(zfsvfs, FTAG);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
/*
* Update SB_NOATIME bit in VFS super block. Since atime update is
* determined by atime_needs_update(), atime_needs_update() needs to
* return false if atime is turned off, and not unconditionally return
* false if atime is turned on.
*/
if (newval)
sb->s_flags &= ~SB_NOATIME;
else
sb->s_flags |= SB_NOATIME;
}
static void
relatime_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_relatime = newval;
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
acltype_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
break;
case ZFS_ACLTYPE_POSIX:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
break;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
}
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_show_ctldir = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_acl_inherit = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
int error = 0;
ASSERT(vfsp);
zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs);
os = zfsvfs->z_os;
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
vfsp->vfs_do_readonly = B_TRUE;
vfsp->vfs_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (vfsp->vfs_do_readonly)
readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
if (vfsp->vfs_do_setuid)
setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
if (vfsp->vfs_do_exec)
exec_changed_cb(zfsvfs, vfsp->vfs_exec);
if (vfsp->vfs_do_devices)
devices_changed_cb(zfsvfs, vfsp->vfs_devices);
if (vfsp->vfs_do_xattr)
xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
if (vfsp->vfs_do_atime)
atime_changed_cb(zfsvfs, vfsp->vfs_atime);
if (vfsp->vfs_do_relatime)
relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
if (vfsp->vfs_do_nbmand)
nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Takes a dataset, a property, a value and that value's setpoint as
* found in the ZAP. Checks if the property has been changed in the vfs.
* If so, val and setpoint will be overwritten with updated content.
* Otherwise, they are left unchanged.
*/
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS)
return (EINVAL);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (zfvp == NULL)
return (ESRCH);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfsp->vfs_do_atime)
tmp = vfsp->vfs_atime;
break;
case ZFS_PROP_RELATIME:
if (vfsp->vfs_do_relatime)
tmp = vfsp->vfs_relatime;
break;
case ZFS_PROP_DEVICES:
if (vfsp->vfs_do_devices)
tmp = vfsp->vfs_devices;
break;
case ZFS_PROP_EXEC:
if (vfsp->vfs_do_exec)
tmp = vfsp->vfs_exec;
break;
case ZFS_PROP_SETUID:
if (vfsp->vfs_do_setuid)
tmp = vfsp->vfs_setuid;
break;
case ZFS_PROP_READONLY:
if (vfsp->vfs_do_readonly)
tmp = vfsp->vfs_readonly;
break;
case ZFS_PROP_XATTR:
if (vfsp->vfs_do_xattr)
tmp = vfsp->vfs_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfsp->vfs_do_nbmand)
tmp = vfsp->vfs_nbmand;
break;
default:
return (ENOENT);
}
if (tmp != *val) {
if (setpoint)
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.\n", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if ((error == 0) && (val == ZFS_XATTR_SA))
zfsvfs->z_xattr_sa = B_TRUE;
}
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT(zfsvfs->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
return (0);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
/*
* Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
* on a failure. Do not pass in a statically allocated zfsvfs.
*/
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_sb = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (int i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
zfsvfs_free(zfsvfs);
return (error);
}
zfsvfs->z_drain_task = TASKQID_INVALID;
zfsvfs->z_draining = B_FALSE;
zfsvfs->z_drain_cancel = B_TRUE;
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
boolean_t readonly = zfs_is_readonly(zfsvfs);
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
if (readonly != 0) {
readonly_changed_cb(zfsvfs, B_FALSE);
} else {
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zfsvfs, B_TRUE);
} else {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, !=, NULL);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data,
&zfsvfs->z_kstat.dk_zil_sums);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i, size = zfsvfs->z_hold_size;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
zfsvfs_vfs_free(zfsvfs->z_vfs);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
#ifdef HAVE_MLSLABEL
/*
* Check that the hex label string is appropriate for the dataset being
* mounted into the global_zone proper.
*
* Return an error if the hex label string is not default or
* admin_low/admin_high. For admin_low labels, the corresponding
* dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
{
if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
/* must be readonly */
uint64_t rdonly;
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (SET_ERROR(EACCES));
return (rdonly ? 0 : SET_ERROR(EACCES));
}
return (SET_ERROR(EACCES));
}
#endif /* HAVE_MLSLABEL */
static int
zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
uint32_t bshift)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
uint64_t quota;
uint64_t used;
int err;
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
sizeof (buf) - offset, B_FALSE);
if (err)
return (err);
if (zfsvfs->z_projectquota_obj == 0)
goto objs;
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
goto objs;
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf + offset, 8, 1, &used);
if (unlikely(err == ENOENT)) {
uint32_t blksize;
u_longlong_t nblocks;
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
if (unlikely(zp->z_blksz == 0))
blksize = zfsvfs->z_max_blksz;
used = blksize * nblocks;
} else if (err) {
return (err);
}
statp->f_blocks = quota >> bshift;
statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
statp->f_bavail = statp->f_bfree;
objs:
if (zfsvfs->z_projectobjquota_obj == 0)
return (0);
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
return (0);
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf, 8, 1, &used);
if (unlikely(err == ENOENT)) {
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
used = 1;
} else if (err) {
return (err);
}
statp->f_files = quota;
statp->f_ffree = (quota > used) ? (quota - used) : 0;
return (0);
}
int
zfs_statvfs(struct inode *ip, struct kstatfs *statp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int err = 0;
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
/*
* The underlying storage pool actually uses multiple block
* size. Under Solaris frsize (fragment size) is reported as
* the smallest block size we support, and bsize (block size)
* as the filesystem's maximum block size. Unfortunately,
* under Linux the fragment size and block size are often used
* interchangeably. Thus we are forced to report both of them
* as the filesystem's maximum block size.
*/
statp->f_frsize = zfsvfs->z_max_blksz;
statp->f_bsize = zfsvfs->z_max_blksz;
uint32_t bshift = fls(statp->f_bsize) - 1;
/*
* The following report "total" blocks of various kinds in
* the file system, but reported in terms of f_bsize - the
* "preferred" size.
*/
/* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of objects available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
statp->f_files = statp->f_ffree + usedobjs;
statp->f_fsid.val[0] = (uint32_t)fsid;
statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
statp->f_type = ZFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
/*
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
memset(statp->f_spare, 0, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
znode_t *zp = ITOZ(ip);
if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
zpl_is_valid_projid(zp->z_projid))
err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
}
zfs_exit(zfsvfs, FTAG);
return (err);
}
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;
int error;
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*ipp = ZTOI(rootzp);
zfs_exit(zfsvfs, FTAG);
return (error);
}
/*
* Linux kernels older than 3.1 do not support a per-filesystem shrinker.
* To accommodate this we must improvise and manually walk the list of znodes
* attempting to prune dentries in order to be able to drop the inodes.
*
* To avoid scanning the same znodes multiple times they are always rotated
* to the end of the z_all_znodes list. New znodes are inserted at the
* end of the list so we're always scanning the oldest znodes first.
*/
static int
zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
{
znode_t **zp_array, *zp;
int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
int objects = 0;
int i = 0, j = 0;
zp_array = vmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
if ((i++ > nr_to_scan) || (j >= max_array))
break;
ASSERT(list_link_active(&zp->z_link_node));
list_remove(&zfsvfs->z_all_znodes, zp);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
/* Skip active znodes and .zfs entries */
if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
continue;
if (igrab(ZTOI(zp)) == NULL)
continue;
zp_array[j] = zp;
j++;
}
mutex_exit(&zfsvfs->z_znodes_lock);
for (i = 0; i < j; i++) {
zp = zp_array[i];
ASSERT3P(zp, !=, NULL);
d_prune_aliases(ZTOI(zp));
if (atomic_read(&ZTOI(zp)->i_count) == 1)
objects++;
zrele(zp);
}
vmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects);
}
/*
* The ARC has requested that the filesystem drop entries from the dentry
* and inode caches. This can occur when the ARC needs to free meta data
* blocks but can't because they are all pinned by entries in these caches.
*/
int
zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
int error = 0;
struct shrinker *shrinker = &sb->s_shrink;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
.gfp_mask = GFP_KERNEL,
};
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
defined(SHRINK_CONTROL_HAS_NID) && \
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
/*
* reset sc.nr_to_scan, modified by
* scan_objects == super_cache_scan
*/
sc.nr_to_scan = nr_to_scan;
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
*objects = (*shrinker->scan_objects)(shrinker, &sc);
#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
*objects = (*shrinker->shrink)(shrinker, &sc);
#elif defined(HAVE_D_PRUNE_ALIASES)
#define D_PRUNE_ALIASES_IS_DEFAULT
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#else
#error "No available dentry and inode cache pruning mechanism."
#endif
#if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
#undef D_PRUNE_ALIASES_IS_DEFAULT
/*
* Fall back to zfs_prune_aliases if the kernel's per-superblock
* shrinker couldn't free anything, possibly due to the inodes being
* allocated in a different memcg.
*/
if (*objects == 0)
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#endif
zfs_exit(zfsvfs, FTAG);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"pruning, nr_to_scan=%lu objects=%d error=%d\n",
nr_to_scan, *objects, error);
return (error);
}
/*
* Teardown the zfsvfs_t.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
zfs_unlinked_drain_stop_wait(zfsvfs);
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but iputs run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's super block as the
* parent filesystem and all of its snapshots have their
* inode's super block set to the parent's filesystem's
* super block. Note, 'z_parent' is self referential
* for non-snapshots.
*/
shrink_dcache_sb(zfsvfs->z_parent->z_sb);
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no VFS ops active, and any new VFS ops
* will fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs. We also grab an extra reference to all
* the remaining inodes so that the kernel does not attempt to free
* any inodes of a suspended fs. This can cause deadlocks since the
* zfs_resume_fs() process may involve starting threads, which might
* attempt to free unreferenced inodes to free up memory for the new
* thread.
*/
if (!unmounting) {
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
if (igrab(ZTOI(zp)) != NULL)
zp->z_suspended = B_TRUE;
}
mutex_exit(&zfsvfs->z_znodes_lock);
}
/*
* If we are unmounting, set the unmounted flag and let new VFS ops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other VFS ops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
*
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty) {
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
}
dmu_objset_evict_dbufs(zfsvfs->z_os);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
#if defined(HAVE_SUPER_SETUP_BDI_NAME)
atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
#endif
int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
vfs_t *vfs = NULL;
int canwrite;
int dataset_visible_zone;
ASSERT(zm);
ASSERT(osname);
dataset_visible_zone = zone_dataset_visible(osname, &canwrite);
/*
* Refuse to mount a filesystem if we are in a namespace and the
* dataset is not visible or writable in that namespace.
*/
if (!INGLOBALZONE(curproc) &&
(!dataset_visible_zone || !canwrite)) {
return (SET_ERROR(EPERM));
}
error = zfsvfs_parse_options(zm->mnt_data, &vfs);
if (error)
return (error);
/*
* If a non-writable filesystem is being mounted without the
* read-only flag, pretend it was set, as done for snapshots.
*/
if (!canwrite)
vfs->vfs_readonly = true;
error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
if (error) {
zfsvfs_vfs_free(vfs);
goto out;
}
if ((error = dsl_prop_get_integer(osname, "recordsize",
&recordsize, NULL))) {
zfsvfs_vfs_free(vfs);
goto out;
}
vfs->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfs;
zfsvfs->z_sb = sb;
sb->s_fs_info = zfsvfs;
sb->s_magic = ZFS_SUPER_MAGIC;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_blocksize = recordsize;
sb->s_blocksize_bits = ilog2(recordsize);
error = -zpl_bdi_setup(sb, "zfs");
if (error)
goto out;
sb->s_bdi->ra_pages = 0;
/* Set callback operations for the file system. */
sb->s_op = &zpl_super_operations;
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acltype_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
zfsvfs->z_snap_defer_time = jiffies;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
/* Allocate a root inode for the filesystem. */
error = zfs_root(zfsvfs, &root_inode);
if (error) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
goto out;
}
/* Allocate a root dentry for the filesystem */
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
zfsvfs = NULL; /* avoid double-free; first in zfs_umount */
error = SET_ERROR(ENOMEM);
goto out;
}
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
if (error) {
if (zfsvfs != NULL) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
/*
* make sure we don't have dangling sb->s_fs_info which
* zfs_preumount will use.
*/
sb->s_fs_info = NULL;
}
return (error);
}
/*
* Called when an unmount is requested and certain sanity checks have
* already passed. At this point no dentries or inodes have been reclaimed
* from their respective caches. We drop the extra reference on the .zfs
* control directory to allow everything to be reclaimed. All snapshots
* must already have been unmounted to reach this point.
*/
void
zfs_preumount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/* zfsvfs is NULL when zfs_domount fails during mount */
if (zfsvfs) {
zfs_unlinked_drain_stop_wait(zfsvfs);
zfsctl_destroy(sb->s_fs_info);
/*
* Wait for zrele_async before entering evict_inodes in
* generic_shutdown_super. The reason we must finish before
* evict_inodes is when lazytime is on, or when zfs_purgedir
* calls zfs_zget, zrele would bump i_count from 0 to 1. This
* would race with the i_count check in evict_inodes. This means
* it could destroy the inode while we are still using it.
*
* We wait for two passes. xattr directories in the first pass
* may add xattr entries in zfs_purgedir, so in the second pass
* we wait for them. We don't use taskq_wait here because it is
* a pool wide taskq. Other mounted filesystems can constantly
* do zrele_async and there's no guarantee when taskq will be
* empty.
*/
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
}
}
/*
* Called once all other unmount released tear down has occurred.
* It is our responsibility to release any remaining infrastructure.
*/
int
zfs_umount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
objset_t *os;
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
os = zfsvfs->z_os;
zpl_bdi_destroy(sb);
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
zfsvfs_free(zfsvfs);
return (0);
}
int
zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
vfs_t *vfsp;
boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
return (EROFS);
}
error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
if (error)
return (error);
if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
zfs_unregister_callbacks(zfsvfs);
zfsvfs_vfs_free(zfsvfs->z_vfs);
vfsp->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfsp;
if (!issnap)
(void) zfs_register_callbacks(vfsp);
return (error);
}
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*ipp = NULL;
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
return (SET_ERROR(EINVAL));
}
/* LONG_FID_LEN means snapdirs */
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
dprintf("snapdir fid: objsetid (%llu) != "
"ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
objsetid, ZFSCTL_INO_SNAPDIRS, object);
return (SET_ERROR(EINVAL));
}
if (fid_gen > 1 || setgen != 0) {
dprintf("snapdir fid: fid_gen (%llu) and setgen "
"(%llu)\n", fid_gen, setgen);
return (SET_ERROR(EINVAL));
}
return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
}
if ((err = zfs_enter(zfsvfs, FTAG)) != 0)
return (err);
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
*ipp = zfsvfs->z_ctldir;
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
0, kcred, NULL, NULL) == 0);
} else {
/*
* Must have an existing ref, so igrab()
* cannot return NULL
*/
VERIFY3P(igrab(*ipp), !=, NULL);
}
zfs_exit(zfsvfs, FTAG);
return (0);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
zfs_exit(zfsvfs, FTAG);
return (err);
}
/* Don't export xattr stuff */
if (zp->z_pflags & ZFS_XATTR) {
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if ((fid_gen == 0) && (zfsvfs->z_root == object))
fid_gen = zp_gen;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
fid_gen);
zrele(zp);
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(ENOENT));
}
*ipp = ZTOI(zp);
if (*ipp)
zfs_znode_update_vfs(ITOZ(*ipp));
zfs_exit(zfsvfs, FTAG);
return (0);
}
/*
* Block out VFS ops and close zfsvfs_t
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err, err2;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
/*
* Attempt to re-establish all the active inodes with their
* dbufs. If a zfs_rezget() fails, then we unhash the inode
* and mark it stale. This prevents a collision if a new
* inode/object is created which must use the same inode
* number. The stale inode will be be released when the
* VFS prunes the dentry holding the remaining references
* on the stale inode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
err2 = zfs_rezget(zp);
if (err2) {
zpl_d_drop_aliases(ZTOI(zp));
remove_inode_hash(ZTOI(zp));
}
/* see comment in zfs_suspend_fs() */
if (zp->z_suspended) {
zfs_zrele_async(zp);
zp->z_suspended = B_FALSE;
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
/*
* zfs_suspend_fs() could have interrupted freeing
* of dnodes. We need to restart this freeing so
* that we don't "leak" the space.
*/
zfs_unlinked_drain(zfsvfs);
}
/*
* Most of the time zfs_suspend_fs is used for changing the contents
* of the underlying dataset. ZFS rollback and receive operations
* might create files for which negative dentries are present in
* the cache. Since walking the dcache would require a lot of GPL-only
* code duplication, it's much easier on these rather rare occasions
* just to flush the whole dcache for the given dataset/filesystem.
*/
shrink_dcache_sb(zfsvfs->z_sb);
bail:
if (err != 0)
zfsvfs->z_unmounted = B_TRUE;
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (zfsvfs->z_os)
(void) zfs_umount(zfsvfs->z_sb);
}
return (err);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_sb);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
/*
* Automounted snapshots rely on periodic revalidation
* to defer snapshots from being automatically unmounted.
*/
inline void
zfs_exit_fs(zfsvfs_t *zfsvfs)
{
if (!zfsvfs->z_issnap)
return;
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os),
zfs_expire_snapshot);
}
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %llu to %llu", zfsvfs->z_version, newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
-/*
- * Read a property stored within the master node.
- */
-int
-zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
-{
- uint64_t *cached_copy = NULL;
-
- /*
- * Figure out where in the objset_t the cached copy would live, if it
- * is available for the requested property.
- */
- if (os != NULL) {
- switch (prop) {
- case ZFS_PROP_VERSION:
- cached_copy = &os->os_version;
- break;
- case ZFS_PROP_NORMALIZE:
- cached_copy = &os->os_normalization;
- break;
- case ZFS_PROP_UTF8ONLY:
- cached_copy = &os->os_utf8only;
- break;
- case ZFS_PROP_CASE:
- cached_copy = &os->os_casesensitivity;
- break;
- default:
- break;
- }
- }
- if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
- *value = *cached_copy;
- return (0);
- }
-
- /*
- * If the property wasn't cached, look up the file system's value for
- * the property. For the version property, we look up a slightly
- * different string.
- */
- const char *pname;
- int error = ENOENT;
- if (prop == ZFS_PROP_VERSION)
- pname = ZPL_VERSION_STR;
- else
- pname = zfs_prop_to_name(prop);
-
- if (os != NULL) {
- ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
- error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
- }
-
- if (error == ENOENT) {
- /* No value set, use the default value */
- switch (prop) {
- case ZFS_PROP_VERSION:
- *value = ZPL_VERSION;
- break;
- case ZFS_PROP_NORMALIZE:
- case ZFS_PROP_UTF8ONLY:
- *value = 0;
- break;
- case ZFS_PROP_CASE:
- *value = ZFS_CASE_SENSITIVE;
- break;
- case ZFS_PROP_ACLTYPE:
- *value = ZFS_ACLTYPE_OFF;
- break;
- default:
- return (error);
- }
- error = 0;
- }
-
- /*
- * If one of the methods for getting the property value above worked,
- * copy it into the objset_t's cache.
- */
- if (error == 0 && cached_copy != NULL) {
- *cached_copy = *value;
- }
-
- return (error);
-}
-
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_unmounted)
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
/*
* We don't need to do anything here, the devname is always current by
* virtue of zfsvfs->z_sb->s_op->show_devname.
*/
(void) oldname, (void) newname;
}
void
zfs_init(void)
{
zfsctl_init();
zfs_znode_init();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
register_filesystem(&zpl_fs_type);
}
void
zfs_fini(void)
{
/*
* we don't use outstanding because zpl_posix_acl_free might add more.
*/
taskq_wait(system_delay_taskq);
taskq_wait(system_taskq);
unregister_filesystem(&zpl_fs_type);
zfs_znode_fini();
zfsctl_fini();
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_suspend_fs);
EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
EXPORT_SYMBOL(zfs_is_readonly);
EXPORT_SYMBOL(zfs_domount);
EXPORT_SYMBOL(zfs_preumount);
EXPORT_SYMBOL(zfs_umount);
EXPORT_SYMBOL(zfs_remount);
EXPORT_SYMBOL(zfs_statvfs);
EXPORT_SYMBOL(zfs_vget);
EXPORT_SYMBOL(zfs_prune);
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index c104cd661bf5..02b1af3edc4f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -1,2267 +1,2352 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
static kmem_cache_t *znode_cache = NULL;
static kmem_cache_t *znode_hold_cache = NULL;
unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
/*
* This is used by the test suite so that it can delay znodes from being
* freed in order to inspect the unlinked set.
*/
static int zfs_unlink_suspend_progress = 0;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_t *zp = buf;
inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
return (0);
}
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_t *zp = buf;
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
ASSERT0(atomic_load_32(&zp->z_sync_writes_cnt));
ASSERT0(atomic_load_32(&zp->z_async_writes_cnt));
}
static int
zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
{
(void) arg, (void) kmflags;
znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zh->zh_refcount = 0;
return (0);
}
static void
zfs_znode_hold_cache_destructor(void *buf, void *arg)
{
(void) arg;
znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache. The KMC_SLAB hint is used in order that it be
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
ASSERT(znode_hold_cache == NULL);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
}
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
if (znode_hold_cache)
kmem_cache_destroy(znode_hold_cache);
znode_hold_cache = NULL;
}
/*
* The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
* serialize access to a znode and its SA buffer while the object is being
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
* zfs_znode_hold_exit() the process is reversed. The per-object lock is
* released, removed from the AVL tree and destroyed if there are no waiters.
*
* This scheme has two important properties:
*
* 1) No memory allocations are performed while holding one of the z_hold_locks.
* This ensures evict(), which can be called from direct memory reclaim, will
* never block waiting on a z_hold_locks which just happens to have hashed
* to the same index.
*
* 2) All locks used to serialize access to an object are per-object and never
* shared. This minimizes lock contention without creating a large number
* of dedicated locks.
*
* On the downside it does require znode_lock_t structures to be frequently
* allocated and freed. However, because these are backed by a kmem cache
* and very short lived this cost is minimal.
*/
int
zfs_znode_hold_compare(const void *a, const void *b)
{
const znode_hold_t *zh_a = (const znode_hold_t *)a;
const znode_hold_t *zh_b = (const znode_hold_t *)b;
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
znode_hold_t *
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
zh->zh_obj = obj;
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
zh->zh_refcount++;
ASSERT3S(zh->zh_refcount, >, 0);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
mutex_enter(&zh->zh_lock);
return (zh);
}
void
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]);
ASSERT3S(zh->zh_refcount, >, 0);
if (--zh->zh_refcount == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
dev_t
zfs_cmpldev(uint64_t dev)
{
return (dev);
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
mutex_exit(&zp->z_lock);
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) ||
RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
* Called by new_inode() to allocate a new inode.
*/
int
zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
znode_t *zp;
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
*ip = ZTOI(zp);
return (0);
}
/*
* Called in multiple places when an inode should be destroyed.
*/
void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
static void
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
switch (ip->i_mode & S_IFMT) {
case S_IFREG:
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
case S_IFDIR:
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
ip->i_flags |= S_IOPS_WRAPPER;
ip->i_op = &zpl_dir_inode_operations.ops;
#else
ip->i_op = &zpl_dir_inode_operations;
#endif
ip->i_fop = &zpl_dir_file_operations;
ITOZ(ip)->z_zn_prefetch = B_TRUE;
break;
case S_IFLNK:
ip->i_op = &zpl_symlink_inode_operations;
break;
/*
* rdev is only stored in a SA only for device files.
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
zfs_fallthrough;
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ip, ip->i_mode, rdev);
ip->i_op = &zpl_special_inode_operations;
break;
default:
zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
(u_longlong_t)ip->i_ino, ip->i_mode);
/* Assume the inode is a file and attempt to continue */
ip->i_mode = S_IFREG | 0644;
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
}
}
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
* Linux and Solaris have different sets of file attributes, so we
* restrict this conversion to the intersection of the two.
*/
#ifdef HAVE_INODE_SET_FLAGS
unsigned int flags = 0;
if (zp->z_pflags & ZFS_IMMUTABLE)
flags |= S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
flags |= S_APPEND;
inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
#else
if (zp->z_pflags & ZFS_IMMUTABLE)
ip->i_flags |= S_IMMUTABLE;
else
ip->i_flags &= ~S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
ip->i_flags |= S_APPEND;
else
ip->i_flags &= ~S_APPEND;
#endif
}
/*
* Update the embedded inode given the znode.
*/
void
zfs_znode_update_vfs(znode_t *zp)
{
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
if (zfsctl_is_node(ip))
return;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
}
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
uint64_t tmp_gen;
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[12];
int count = 0;
ASSERT(zfsvfs != NULL);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
#if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
zp->z_is_mapped = B_FALSE;
#endif
zp->z_is_ctldir = B_FALSE;
zp->z_suspended = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zp->z_sync_writes_cnt = 0;
zp->z_async_writes_cnt = 0;
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
set_nlink(ip, (uint32_t)links);
zfs_uid_write(ip, z_uid);
zfs_gid_write(ip, z_gid);
zfs_set_inode_flags(zp, ip);
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
ZFS_TIME_DECODE(&ip->i_atime, atime);
ZFS_TIME_DECODE(&ip->i_mtime, mtime);
ZFS_TIME_DECODE(&ip->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
ip->i_ino = zp->z_id;
zfs_znode_update_vfs(zp);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* Exceptions include rolling back a mounted file system, either
* from the zfs rollback or zfs recv command.
*
* Active inodes are unhashed during the rollback, but since zrele
* can happen asynchronously, we can't guarantee they've been
* unhashed. This can cause hash collisions in unlinked drain
* processing so do not hash unlinked znodes.
*/
if (links > 0)
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0)
unlock_new_inode(ip);
return (zp);
error:
iput(ip);
return (NULL);
}
/*
* Safely mark an inode dirty. Inodes which are part of a read-only
* file system or snapshot may not be dirtied.
*/
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_TMPFILE - new object is of O_TMPFILE
* IS_XATTR - new object is an attribute
* acl_ids - ACL related attributes
*
* OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
inode_timespec_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = 2;
} else {
size = 0;
links = (flag & IS_TMPFILE) ? 0 : 1;
}
if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
/*
* With ZFS_PROJID flag, we can easily know whether there is
* project ID stored on disk or not. See zfs_space_delta_cb().
*/
if (obj_type != DMU_OT_ZNODE &&
dmu_objset_projectquota_enabled(zfsvfs->z_os))
pflags |= ZFS_PROJID;
/*
* Inherit project ID from parent if required.
*/
projid = zfs_inherit_projid(dzp);
if (dzp->z_pflags & ZFS_PROJINHERIT)
pflags |= ZFS_PROJINHERIT;
}
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
} else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
pflags & ZFS_PROJID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
/*
* The call to zfs_znode_alloc() may fail if memory is low
* via the call path: alloc_inode() -> inode_init_always() ->
* security_inode_alloc() -> inode_alloc_security(). Since
* the existing code is written such that zfs_mknode() can
* not fail retry until sufficient memory has been reclaimed.
*/
do {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
} while (*zpp == NULL);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
boolean_t update_inode = B_FALSE;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
znode_hold_t *zh;
int err;
sa_handle_t *hdl;
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
/*
* If zp->z_unlinked is set, the znode is already marked
* for deletion and should not be discovered. Check this
* after checking igrab() due to fsetxattr() & O_TMPFILE.
*
* If igrab() returns NULL the VFS has independently
* determined the inode should be evicted and has
* called iput_final() to start the eviction process.
* The SA handle is still valid but because the VFS
* requires that the eviction succeed we must drop
* our locks and references to allow the eviction to
* complete. The zfs_zget() may then be retried.
*
* This unlikely case could be optimized by registering
* a sops->drop_inode() callback. The callback would
* need to detect the active SA hold thereby informing
* the VFS that this inode should not be evicted.
*/
if (igrab(ZTOI(zp)) == NULL) {
if (zp->z_unlinked)
err = SET_ERROR(ENOENT);
else
err = SET_ERROR(EAGAIN);
} else {
*zpp = zp;
err = 0;
}
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
if (err == EAGAIN) {
/* inode might need this to finish evict */
cond_resched();
goto again;
}
return (err);
}
/*
* Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
uint64_t mode;
uint64_t links;
sa_bulk_attr_t bulk[11];
int err;
int count = 0;
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
* skip ctldir, otherwise they will always get invalidated. This will
* cause funny behaviour for the mounted snapdirs. Especially for
* Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
* anyone automount it again as long as someone is still using the
* detached mount.
*/
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
&projid, 8);
if (err != 0 && err != ENOENT) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(err));
}
}
zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
if ((uint32_t)gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
set_nlink(ZTOI(zp), (uint32_t)links);
zfs_set_inode_flags(zp, ZTOI(zp));
zp->z_blksz = doi.doi_data_block_size;
zp->z_atime_dirty = B_FALSE;
zfs_znode_update_vfs(zp);
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
if (zp->z_unlinked)
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if (!zfs_is_readonly(zfsvfs) && !zfs_unlink_suspend_progress) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zfs_compare_timespec timespec64_compare
#else
#define zfs_compare_timespec timespec_compare
#endif
/*
* Determine whether the znode's atime must be updated. The logic mostly
* duplicates the Linux kernel's relatime_need_update() functionality.
* This function is only called if the underlying filesystem actually has
* atime updates enabled.
*/
boolean_t
zfs_relatime_need_update(const struct inode *ip)
{
inode_timespec_t now;
gethrestime(&now);
/*
* In relatime mode, only update the atime if the previous atime
* is earlier than either the ctime or mtime or if at least a day
* has passed since the last update of atime.
*/
if (zfs_compare_timespec(&ip->i_mtime, &ip->i_atime) >= 0)
return (B_TRUE);
if (zfs_compare_timespec(&ip->i_ctime, &ip->i_atime) >= 0)
return (B_TRUE);
if ((hrtime_t)now.tv_sec - (hrtime_t)ip->i_atime.tv_sec >= 24*60*60)
return (B_TRUE);
return (B_FALSE);
}
/*
* Prepare to update znode time stamps.
*
* IN: zp - znode requiring timestamp update
* flag - ATTR_MTIME, ATTR_CTIME flags
*
* OUT: zp - z_seq
* mtime - new mtime
* ctime - new ctime
*
* Note: We don't update atime here, because we rely on Linux VFS to do
* atime updating.
*/
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
inode_timespec_t now;
gethrestime(&now);
zp->z_seq++;
if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_ctime), ctime);
if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* zfs_zero_partial_page - Modeled after update_pages() but
* with different arguments and semantics for use by zfs_freesp().
*
* Zeroes a piece of a single page cache entry for zp at offset
* start and length len.
*
* Caller must acquire a range lock on the file for the region
* being zeroed in order that the ARC and page cache stay in sync.
*/
static void
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
struct page *pp;
int64_t off;
void *pb;
ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
off = start & (PAGE_SIZE - 1);
start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
memset(pb + off, 0, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
* range lock in order to keep the ARC and page cache in sync.
*/
if (zn_has_cached_data(zp, off, off + len - 1)) {
loff_t first_page, last_page, page_len;
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
first_page_offset, last_page_offset - 1);
}
/* truncate sub-page ranges */
if (first_page > last_page) {
/* entire punched area within a single page */
zfs_zero_partial_page(zp, off, len);
} else {
/* beginning of punched area at the end of a page */
page_len = first_page_offset - off;
if (page_len > 0)
zfs_zero_partial_page(zp, off, page_len);
/* end of punched area at the beginning of a page */
page_len = off + len - last_page_offset;
if (page_len > 0)
zfs_zero_partial_page(zp, last_page_offset,
page_len);
}
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
goto out;
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
error = 0;
out:
/*
* Truncate the page cache - for file truncate operations, use
* the purpose-built API for truncations. For punching operations,
* the truncation is handled under a range lock in zfs_free_range.
*/
if (len == 0)
truncate_setsize(ZTOI(zp), off);
return (error);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int size;
int error;
int i;
znode_t *rootzp = NULL;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
const char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
ASSERT(error == 0);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT(error == 0);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, const void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, const void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
for (;;) {
uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir = 0;
if (prevdb) {
ASSERT(prevhdl != NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
strcpy(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
memcpy(path, component, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
+/*
+ * Read a property stored within the master node.
+ */
+int
+zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
+{
+ uint64_t *cached_copy = NULL;
+
+ /*
+ * Figure out where in the objset_t the cached copy would live, if it
+ * is available for the requested property.
+ */
+ if (os != NULL) {
+ switch (prop) {
+ case ZFS_PROP_VERSION:
+ cached_copy = &os->os_version;
+ break;
+ case ZFS_PROP_NORMALIZE:
+ cached_copy = &os->os_normalization;
+ break;
+ case ZFS_PROP_UTF8ONLY:
+ cached_copy = &os->os_utf8only;
+ break;
+ case ZFS_PROP_CASE:
+ cached_copy = &os->os_casesensitivity;
+ break;
+ default:
+ break;
+ }
+ }
+ if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
+ *value = *cached_copy;
+ return (0);
+ }
+
+ /*
+ * If the property wasn't cached, look up the file system's value for
+ * the property. For the version property, we look up a slightly
+ * different string.
+ */
+ const char *pname;
+ int error = ENOENT;
+ if (prop == ZFS_PROP_VERSION)
+ pname = ZPL_VERSION_STR;
+ else
+ pname = zfs_prop_to_name(prop);
+
+ if (os != NULL) {
+ ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
+ error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
+ }
+
+ if (error == ENOENT) {
+ /* No value set, use the default value */
+ switch (prop) {
+ case ZFS_PROP_VERSION:
+ *value = ZPL_VERSION;
+ break;
+ case ZFS_PROP_NORMALIZE:
+ case ZFS_PROP_UTF8ONLY:
+ *value = 0;
+ break;
+ case ZFS_PROP_CASE:
+ *value = ZFS_CASE_SENSITIVE;
+ break;
+ case ZFS_PROP_ACLTYPE:
+ *value = ZFS_ACLTYPE_OFF;
+ break;
+ default:
+ return (error);
+ }
+ error = 0;
+ }
+
+ /*
+ * If one of the methods for getting the property value above worked,
+ * copy it into the objset_t's cache.
+ */
+ if (error == 0 && cached_copy != NULL) {
+ *cached_copy = *value;
+ }
+
+ return (error);
+}
+
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644);
MODULE_PARM_DESC(zfs_unlink_suspend_progress, "Set to prevent async unlinks "
"(debug - leaks space into the unlinked set)");
#endif
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index a78f664c4fe8..a23715309f2b 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,10913 +1,10927 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* metadata limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed. For example, when using the ZPL each dentry
* holds a references on a znode. These dentries must be pruned before
* the arc buffer holding the znode can be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will memcpy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static arc_buf_hdr_t **arc_state_evict_markers;
static int arc_state_evict_marker_count;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
static clock_t arc_last_uncached_flush;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
static uint_t zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
static uint_t zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
uint_t arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
static const int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
static int zfs_arc_overflow_shift = 8;
/* log2(fraction of arc to reclaim) */
uint_t arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
uint_t arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static uint_t arc_min_prefetch_ms;
static uint_t arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
uint_t arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
uint64_t zfs_arc_max = 0;
uint64_t zfs_arc_min = 0;
static uint64_t zfs_arc_dnode_limit = 0;
static uint_t zfs_arc_dnode_reduce_percent = 10;
static uint_t zfs_arc_grow_retry = 0;
static uint_t zfs_arc_shrink_shift = 0;
uint_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle:
* * total dirty data limit
* * anon block dirty limit
* * each pool's anon allowance
*/
static const unsigned long zfs_arc_dirty_limit_percent = 50;
static const unsigned long zfs_arc_anon_limit_percent = 25;
static const unsigned long zfs_arc_pool_dirty_percent = 20;
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* Balance between metadata and data on ghost hits. Values above 100
* increase metadata caching by proportionally reducing effect of ghost
* data hits on target data/metadata rate.
*/
static uint_t zfs_arc_meta_balance = 500;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
static uint_t zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux-specific
*/
static uint64_t zfs_arc_sys_free = 0;
static uint_t zfs_arc_min_prefetch_ms = 0;
static uint_t zfs_arc_min_prescient_prefetch_ms = 0;
static uint_t zfs_arc_lotsfree_percent = 10;
/*
* Number of arc_prune threads
*/
static int zfs_arc_prune_task_threads = 1;
/* The 7 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_state_t ARC_uncached;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "iohits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_iohits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_iohits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_iohits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "uncached_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "meta", KSTAT_DATA_UINT64 },
{ "pd", KSTAT_DATA_UINT64 },
{ "pm", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_data", KSTAT_DATA_UINT64 },
{ "anon_metadata", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_data", KSTAT_DATA_UINT64 },
{ "mru_metadata", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_data", KSTAT_DATA_UINT64 },
{ "mfu_metadata", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "uncached_size", KSTAT_DATA_UINT64 },
{ "uncached_data", KSTAT_DATA_UINT64 },
{ "uncached_metadata", KSTAT_DATA_UINT64 },
{ "uncached_evictable_data", KSTAT_DATA_UINT64 },
{ "uncached_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "demand_iohit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
} while (0)
static kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
uint64_t l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
static uint_t l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_USE_RESERVE = 0x4,
ARC_HDR_ALLOC_LINEAR = 0x8,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, const void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, const void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, const void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, const void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, const void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size,
const void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_hdr_destroy(arc_buf_hdr_t *);
static void arc_access(arc_buf_hdr_t *, arc_flags_t, boolean_t);
static void arc_buf_watch(arc_buf_t *);
static void arc_change_state(arc_state_t *, arc_buf_hdr_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_exclude_special : A zfs module parameter that controls whether buffers
* present on special vdevs are eligibile for caching in L2ARC. If
* set to 1, exclude dbufs on special vdevs from being cached to
* L2ARC.
*/
int l2arc_exclude_special = 0;
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
static int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
static uint64_t l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
static int l2arc_rebuild_enabled = B_TRUE;
static uint64_t l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static __attribute__((noreturn)) void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
-static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
+static uint64_t l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
#endif
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
memset(&hdr->b_crypt_hdr, 0, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
memset(hdr, 0, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
memset(buf, 0, sizeof (arc_buf_t));
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
static void
hdr_full_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
#ifdef ZFS_DEBUG
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
#endif
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
(void) vbuf, (void) unused;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (((arc_buf_hdr_t *)NULL)->b_crypt_hdr),
ARC_SPACE_HDRS);
}
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
(void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
static void
buf_dest(void *vbuf, void *unused)
{
(void) unused;
(void) vbuf;
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
memcpy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
memcpy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
memcpy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
#ifdef ZFS_DEBUG
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
#ifdef ZFS_DEBUG
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
#endif
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
(void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#else
(void) buf;
#endif
}
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#else
(void) buf;
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
memcpy(buf->b_data, from->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
#ifdef ZFS_DEBUG
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
#endif
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, &tmpbuf, lsize, hdr->b_complevel);
ASSERT3P(tmpbuf, !=, NULL);
ASSERT3U(csize, <=, psize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, 0);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, 0);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb, &buf->b_hdr->b_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
state != arc_anon && state != arc_l2c_only) {
/* We don't use the L2-only state list. */
multilist_remove(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_decrement(hdr, state);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, const void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(!GHOST_STATE(state)); /* arc_l2c_only counts as a ghost. */
if ((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) != 0)
return (cnt);
if (state == arc_anon) {
arc_hdr_destroy(hdr);
return (0);
}
if (state == arc_uncached && !HDR_PREFETCH(hdr)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
return (0);
}
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
arc_evictable_space_increment(hdr, state);
return (0);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
(void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t type = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
IMPLY(GHOST_STATE(old_state), bufcnt == 0);
IMPLY(GHOST_STATE(new_state), bufcnt == 0);
IMPLY(GHOST_STATE(old_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(GHOST_STATE(new_state), hdr->b_l1hdr.b_buf == NULL);
IMPLY(old_state == arc_anon, bufcnt <= 1);
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
if (GHOST_STATE(old_state))
update_old = B_TRUE;
if (GHOST_STATE(new_state))
update_new = B_TRUE;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(new_state, !=, old_state);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
/* remove_reference() saves on insert. */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
multilist_remove(&old_state->arcs_list[type],
hdr);
arc_evictable_space_decrement(hdr, old_state);
}
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[type], hdr);
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size[type],
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
ARCSTAT_INCR(arcstat_meta_used, -space);
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
const void *tag, boolean_t encrypted, boolean_t compressed,
boolean_t noauth, boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static const char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(
&hdr->b_l1hdr.b_state->arcs_size[arc_buf_type(hdr)],
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
ASSERT(HDR_EMPTY(hdr));
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
memcpy(nhdr, hdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
/*
* This function requires that hdr is in the arc_anon state.
* Therefore it won't have any L2ARC data for us to worry
* about copying.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node));
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (need_crypt) {
ncache = hdr_full_crypt_cache;
ocache = hdr_full_cache;
} else {
ncache = hdr_full_cache;
ocache = hdr_full_crypt_cache;
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
/*
* Copy all members that aren't locks or condvars to the new header.
* No lists are pointing to us (as we asserted above), so we don't
* need to worry about the list nodes.
*/
nhdr->b_dva = hdr->b_dva;
nhdr->b_birth = hdr->b_birth;
nhdr->b_type = hdr->b_type;
nhdr->b_flags = hdr->b_flags;
nhdr->b_psize = hdr->b_psize;
nhdr->b_lsize = hdr->b_lsize;
nhdr->b_spa = hdr->b_spa;
#ifdef ZFS_DEBUG
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
#endif
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
/*
* This zfs_refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next)
buf->b_hdr = nhdr;
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
/* unset all members of the original hdr */
memset(&hdr->b_dva, 0, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = 0;
hdr->b_flags = 0;
hdr->b_psize = 0;
hdr->b_lsize = 0;
hdr->b_spa = 0;
#ifdef ZFS_DEBUG
hdr->b_l1hdr.b_freeze_cksum = NULL;
#endif
hdr->b_l1hdr.b_buf = NULL;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_byteswap = 0;
hdr->b_l1hdr.b_state = NULL;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_acb = NULL;
hdr->b_l1hdr.b_pabd = NULL;
if (ocache == hdr_full_crypt_cache) {
ASSERT(!HDR_HAS_RABD(hdr));
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
memset(hdr->b_crypt_hdr.b_salt, 0, ZIO_DATA_SALT_LEN);
memset(hdr->b_crypt_hdr.b_iv, 0, ZIO_DATA_IV_LEN);
memset(hdr->b_crypt_hdr.b_mac, 0, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, const void *tag, arc_buf_contents_t type,
int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, const void *tag, uint64_t psize,
uint64_t lsize, enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, const void *tag, uint64_t dsobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
memcpy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
memcpy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
memcpy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr)) {
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
arc_hdr_l2hdr_destroy(hdr);
}
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, tag));
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
arc_buf_destroy_impl(buf);
(void) remove_reference(hdr, tag);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
* - arc_uncached -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
uint_t min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
(void) arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu || state == arc_uncached);
evicted_state = (state == arc_uncached) ? arc_anon :
((state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost);
/* prefetch buffers have a minimum lifespan */
if ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime)) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed buffer then we
* discard it here before we change states. This ensures that the
* accounting is updated correctly in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
if (evicted_state == arc_anon) {
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
} else {
ASSERT(HDR_IN_HASH_TABLE(hdr));
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint_t evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count == 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, &revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
kpreempt(KPREEMPT_SYNC);
return (bytes_evicted);
}
/*
* Allocate an array of buffer headers used as placeholders during arc state
* eviction.
*/
static arc_buf_hdr_t **
arc_state_alloc_markers(int count)
{
arc_buf_hdr_t **markers;
markers = kmem_zalloc(sizeof (*markers) * count, KM_SLEEP);
for (int i = 0; i < count; i++) {
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
}
return (markers);
}
static void
arc_state_free_markers(arc_buf_hdr_t **markers, int count)
{
for (int i = 0; i < count; i++)
kmem_cache_free(hdr_full_cache, markers[i]);
kmem_free(markers, sizeof (*markers) * count);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, arc_buf_contents_t type, uint64_t spa,
uint64_t bytes)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
if (zthr_iscurthread(arc_evict_zthr)) {
markers = arc_state_evict_markers;
ASSERT3S(num_sublists, <=, arc_state_evict_marker_count);
} else {
markers = arc_state_alloc_markers(num_sublists);
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
}
if (markers != arc_state_evict_markers)
arc_state_free_markers(markers, num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, type, spa, ARC_EVICT_ALL);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified. This
* function prevents us from trying to evict more from a state's list
* than is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, arc_buf_contents_t type, int64_t bytes)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, type, 0, delta));
}
return (0);
}
/*
* Adjust specified fraction, taking into account initial ghost state(s) size,
* ghost hit bytes towards increasing the fraction, ghost hit bytes towards
* decreasing it, plus a balance factor, controlling the decrease rate, used
* to balance metadata vs data.
*/
static uint64_t
arc_evict_adj(uint64_t frac, uint64_t total, uint64_t up, uint64_t down,
uint_t balance)
{
if (total < 8 || up + down == 0)
return (frac);
/*
* We should not have more ghost hits than ghost size, but they
* may get close. Restrict maximum adjustment in that case.
*/
if (up + down >= total / 4) {
uint64_t scale = (up + down) / (total / 8);
up /= scale;
down /= scale;
}
/* Get maximal dynamic range by choosing optimal shifts. */
int s = highbit64(total);
s = MIN(64 - s, 32);
uint64_t ofrac = (1ULL << 32) - frac;
if (frac >= 4 * ofrac)
up /= frac / (2 * ofrac + 1);
up = (up << s) / (total >> (32 - s));
if (ofrac >= 4 * frac)
down /= ofrac / (2 * frac + 1);
down = (down << s) / (total >> (32 - s));
down = down * 100 / balance;
return (frac + up - down);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t asize, bytes, total_evicted = 0;
int64_t e, mrud, mrum, mfud, mfum, w;
static uint64_t ogrd, ogrm, ogfd, ogfm;
static uint64_t gsrd, gsrm, gsfd, gsfm;
uint64_t ngrd, ngrm, ngfd, ngfm;
/* Get current size of ARC states we can evict from. */
mrud = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]);
mrum = zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
mfud = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
mfum = zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
uint64_t d = mrud + mfud;
uint64_t m = mrum + mfum;
uint64_t t = d + m;
/* Get ARC ghost hits since last eviction. */
ngrd = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t grd = ngrd - ogrd;
ogrd = ngrd;
ngrm = wmsum_value(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t grm = ngrm - ogrm;
ogrm = ngrm;
ngfd = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
uint64_t gfd = ngfd - ogfd;
ogfd = ngfd;
ngfm = wmsum_value(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
uint64_t gfm = ngfm - ogfm;
ogfm = ngfm;
/* Adjust ARC states balance based on ghost hits. */
arc_meta = arc_evict_adj(arc_meta, gsrd + gsrm + gsfd + gsfm,
grm + gfm, grd + gfd, zfs_arc_meta_balance);
arc_pd = arc_evict_adj(arc_pd, gsrd + gsfd, grd, gfd, 100);
arc_pm = arc_evict_adj(arc_pm, gsrm + gsfm, grm, gfm, 100);
asize = aggsum_value(&arc_sums.arcstat_size);
int64_t wt = t - (asize - arc_c);
/*
* Try to reduce pinned dnodes if more than 3/4 of wanted metadata
* target is not evictable or if they go over arc_dnode_limit.
*/
int64_t prune = 0;
int64_t dn = wmsum_value(&arc_sums.arcstat_dnode_size);
w = wt * (int64_t)(arc_meta >> 16) >> 16;
if (zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) -
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) -
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]) >
w * 3 / 4) {
prune = dn / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100;
} else if (dn > arc_dnode_limit) {
prune = (dn - arc_dnode_limit) / sizeof (dnode_t) *
zfs_arc_dnode_reduce_percent / 100;
}
if (prune > 0)
arc_prune_async(prune);
/* Evict MRU metadata. */
w = wt * (int64_t)(arc_meta * arc_pm >> 48) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrum - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mrum -= bytes;
asize -= bytes;
/* Evict MFU metadata. */
w = wt * (int64_t)(arc_meta >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(m - w));
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_METADATA, e);
total_evicted += bytes;
mfum -= bytes;
asize -= bytes;
/* Evict MRU data. */
wt -= m - total_evicted;
w = wt * (int64_t)(arc_pd >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrud - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_DATA, e);
total_evicted += bytes;
mrud -= bytes;
asize -= bytes;
/* Evict MFU data. */
e = asize - arc_c;
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_DATA, e);
mfud -= bytes;
total_evicted += bytes;
/*
* Evict ghost lists
*
* Size of each state's ghost list represents how much that state
* may grow by shrinking the other states. Would it need to shrink
* other states to zero (that is unlikely), its ghost size would be
* equal to sum of other three state sizes. But excessive ghost
* size may result in false ghost hits (too far back), that may
* never result in real cache hits if several states are competing.
* So choose some arbitraty point of 1/2 of other state sizes.
*/
gsrd = (mrum + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]) -
gsrd;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_DATA, e);
gsrm = (mrud + mfud + mfum) / 2;
e = zfs_refcount_count(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsrm;
(void) arc_evict_impl(arc_mru_ghost, ARC_BUFC_METADATA, e);
gsfd = (mrud + mrum + mfum) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]) -
gsfd;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_DATA, e);
gsfm = (mrud + mrum + mfud) / 2;
e = zfs_refcount_count(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]) -
gsfm;
(void) arc_evict_impl(arc_mfu_ghost, ARC_BUFC_METADATA, e);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == NULL);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_uncached, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t c = arc_c;
if (c <= arc_c_min)
return;
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
if (asize < c)
to_free += c - asize;
arc_c = MAX((int64_t)c - to_free, (int64_t)arc_c_min);
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
#ifdef _KERNEL
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
if (arc_evict_needed)
return (B_TRUE);
/*
* If we have buffers in uncached state, evict them periodically.
*/
return ((zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]) &&
ddi_get_lbolt() - arc_last_uncached_flush >
MSEC_TO_TICK(arc_min_prefetch_ms / 2)));
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Always try to evict from uncached state. */
arc_last_uncached_flush = ddi_get_lbolt();
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_DATA, B_FALSE);
evicted += arc_flush_state(arc_uncached, 0, ARC_BUFC_METADATA, B_FALSE);
/* Evict from other states only if told to. */
if (arc_evict_needed)
evicted += arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
(void) arg, (void) zthr;
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
int64_t can_free = arc_c - arc_c_min;
if (can_free > 0) {
int64_t to_free = (can_free >> arc_shrink_shift) - free_memory;
if (to_free > 0)
arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(uint64_t bytes)
{
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) +
2 * SPA_MAXBLOCKSIZE >= arc_c) {
uint64_t dc = MAX(bytes, SPA_OLD_MAXBLOCKSIZE);
if (atomic_add_64_nv(&arc_c, dc) > arc_c_max)
arc_c = arc_c_max;
}
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t use_reserve)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
if (!use_reserve)
overflow /= 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (alloc_flags & ARC_HDR_ALLOC_LINEAR)
return (abd_alloc_linear(size, type == ARC_BUFC_METADATA));
else
return (abd_alloc(size, type == ARC_BUFC_METADATA));
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, 0);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve)
{
switch (arc_is_overflowing(use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag,
int alloc_flags)
{
arc_adapt(size);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
alloc_flags & ARC_HDR_USE_RESERVE);
arc_buf_contents_t type = arc_buf_type(hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size[type], size,
tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size,
const void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, const void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, const void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size[type], size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
*/
static void
arc_access(arc_buf_hdr_t *hdr, arc_flags_t arc_flags, boolean_t hit)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Update buffer prefetch status.
*/
boolean_t was_prefetch = HDR_PREFETCH(hdr);
boolean_t now_prefetch = arc_flags & ARC_FLAG_PREFETCH;
if (was_prefetch != now_prefetch) {
if (was_prefetch) {
ARCSTAT_CONDSTAT(hit, demand_hit, demand_iohit,
HDR_PRESCIENT_PREFETCH(hdr), prescient, predictive,
prefetch);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
if (was_prefetch) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH | ARC_FLAG_PRESCIENT_PREFETCH);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (now_prefetch) {
if (arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
ARCSTAT_BUMP(arcstat_prescient_prefetch);
} else {
ARCSTAT_BUMP(arcstat_predictive_prefetch);
}
}
if (arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
clock_t now = ddi_get_lbolt();
if (hdr->b_l1hdr.b_state == arc_anon) {
arc_state_t *new_state;
/*
* This buffer is not in the cache, and does not appear in
* our "ghost" lists. Add it to the MRU or uncached state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = now;
if (HDR_UNCACHED(hdr)) {
new_state = arc_uncached;
DTRACE_PROBE1(new_state__uncached, arc_buf_hdr_t *,
hdr);
} else {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
/*
* This buffer has been accessed once recently and either
* its read is still in progress or it is in the cache.
*/
if (HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
/*
* If the previous access was a prefetch, then it already
* handled possible promotion, so nothing more to do for now.
*/
if (was_prefetch) {
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* If more than ARC_MINTIME have passed from the previous
* hit, promote the buffer to the MFU state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
}
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been accessed once recently, but was
* evicted from the cache. Would we have bigger MRU, it
* would be an MRU hit, so handle it the same way, except
* we don't need to check the previous access time.
*/
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mru_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
if (was_prefetch) {
new_state = arc_mru;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
arc_change_state(new_state, hdr);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and either
* still in the cache or being restored from one of ghosts.
*/
if (!HDR_IO_IN_PROGRESS(hdr)) {
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
}
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
/*
* This buffer has been accessed more than once recently, but
* has been evicted from the cache. Would we have bigger MFU
* it would stay in cache, so move it back to MFU state.
*/
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
hdr->b_l1hdr.b_arc_access = now;
wmsum_add(&arc_mfu_ghost->arcs_hits[arc_buf_type(hdr)],
arc_hdr_size(hdr));
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr);
} else if (hdr->b_l1hdr.b_state == arc_uncached) {
/*
* This buffer is uncacheable, but we got a hit. Probably
* a demand read after prefetch. Nothing more to do here.
*/
if (!HDR_IO_IN_PROGRESS(hdr))
ARCSTAT_BUMP(arcstat_uncached_hits);
hdr->b_l1hdr.b_arc_access = now;
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC and was not accessed
* for a long time, so treat it as new and put into MRU.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr))
return;
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, 0, B_TRUE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(B_TRUE /* demand */, demand, prefetch,
!HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zio, (void) zb, (void) bp;
if (buf == NULL)
return;
memcpy(arg, buf->b_data, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
(void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (zio->io_error == 0) {
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp,
hdr->b_crypt_hdr.b_mac);
}
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
hdr->b_l1hdr.b_acb = NULL;
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
/* We need the last one to call below in original order. */
callback_list = acb;
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
(void) remove_reference(hdr, hdr);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_prev;
if (acb->acb_wait) {
mutex_enter(&acb->acb_wait_lock);
acb->acb_wait_error = zio->io_error;
acb->acb_wait = B_FALSE;
cv_signal(&acb->acb_wait_cv);
mutex_exit(&acb->acb_wait_lock);
/* acb will be freed by the waiting thread. */
} else {
kmem_free(acb, sizeof (arc_callback_t));
}
}
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
arc_buf_t *buf = NULL;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
/*
* Verify the block pointer contents are reasonable. This should
* always be the case since the blkptr is protected by a checksum.
* However, if there is damage it's desirable to detect this early
* and treat it as a checksum error. This allows an alternate blkptr
* to be tried when one is available (e.g. ditto blocks).
*/
if (!zfs_blkptr_verify(spa, bp, (zio_flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto done;
}
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
boolean_t is_data = !HDR_ISTYPE_METADATA(hdr);
if (HDR_IO_IN_PROGRESS(hdr)) {
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto done;
}
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
DTRACE_PROBE1(arc__iohit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_FALSE);
/*
* If there are multiple threads reading the same block
* and that block is not yet in the ARC, then only one
* thread will do the physical I/O and all other
* threads will wait until that I/O completes.
* Synchronous reads use the acb_wait_cv whereas nowait
* reads register a callback. Both are signalled/called
* in arc_read_done.
*
* Errors of the physical I/O may need to be propagated.
* Synchronous read errors are returned here from
* arc_read_done via acb_wait_error. Nowait reads
* attach the acb_zio_dummy zio to pio and
* arc_read_done propagates the physical I/O's io_error
* to acb_zio_dummy, and thereby to pio.
*/
arc_callback_t *acb = NULL;
if (done || pio || *arc_flags & ARC_FLAG_WAIT) {
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
if (*arc_flags & ARC_FLAG_WAIT) {
acb->acb_wait = B_TRUE;
mutex_init(&acb->acb_wait_lock, NULL,
MUTEX_DEFAULT, NULL);
cv_init(&acb->acb_wait_cv, NULL,
CV_DEFAULT, NULL);
}
acb->acb_zb = *zb;
if (pio != NULL) {
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
}
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
if (hdr->b_l1hdr.b_acb)
hdr->b_l1hdr.b_acb->acb_prev = acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_iohits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, iohits);
if (*arc_flags & ARC_FLAG_WAIT) {
mutex_enter(&acb->acb_wait_lock);
while (acb->acb_wait) {
cv_wait(&acb->acb_wait_cv,
&acb->acb_wait_lock);
}
rc = acb->acb_wait_error;
mutex_exit(&acb->acb_wait_lock);
mutex_destroy(&acb->acb_wait_lock);
cv_destroy(&acb->acb_wait_cv);
kmem_free(acb, sizeof (arc_callback_t));
}
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu ||
hdr->b_l1hdr.b_state == arc_uncached);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, *arc_flags, B_TRUE);
if (done && !no_buf) {
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb, &hdr->b_birth);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
arc_buf_destroy_impl(buf);
buf = NULL;
(void) remove_reference(hdr, private);
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
}
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, is_data, data, metadata, hits);
*arc_flags |= ARC_FLAG_CACHED;
goto done;
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
if (hash_lock != NULL)
mutex_exit(hash_lock);
rc = SET_ERROR(ENOENT);
goto done;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
#ifdef ZFS_DEBUG
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
#endif
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
}
if (*arc_flags & ARC_FLAG_UNCACHED) {
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
if (!encrypted_read)
alloc_flags |= ARC_HDR_ALLOC_LINEAR;
}
/*
* Take additional reference for IO_IN_PROGRESS. It stops
* arc_access() from putting this header without any buffers
* and so other references but obviously nonevictable onto
* the evictable list of MRU or MFU state.
*/
add_reference(hdr, hdr);
if (!embedded_bp)
arc_access(hdr, *arc_flags, B_FALSE);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!(*arc_flags & ARC_FLAG_PREFETCH),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch or l2arc_noprefetch is 0.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch &&
(*arc_flags & ARC_FLAG_PREFETCH))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
- zio_flags | ZIO_FLAG_DONT_CACHE |
- ZIO_FLAG_CANFAIL |
+ zio_flags | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
done:
if (done)
done(NULL, zb, bp, buf, private);
if (pio && rc != 0) {
zio_t *zio = zio_null(pio, spa, NULL, NULL, NULL, zio_flags);
zio->io_error = rc;
zio_nowait(zio);
}
goto out;
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has some other reference (i.e. a dedup-ed,
* dmu_sync-ed block). A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) ||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, const void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
VERIFY3S(remove_reference(hdr, tag), >, 0);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, 0);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size[type],
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
(void) zfs_refcount_add_many(&arc_anon->arcs_size[type],
arc_buf_size(buf), buf);
} else {
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
return (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
return (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr)) {
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
add_reference(hdr, hdr); /* For IO_IN_PROGRESS. */
}
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (!(HDR_UNCACHED(hdr) ||
abd_size_alloc_linear(arc_buf_size(buf))) ||
!arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, 0, B_FALSE);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
VERIFY3S(remove_reference(hdr, hdr), >, 0);
}
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t uncached, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (uncached)
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
else if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
memcpy(localprop.zp_salt, hdr->b_crypt_hdr.b_salt,
ZIO_DATA_SALT_LEN);
memcpy(localprop.zp_iv, hdr->b_crypt_hdr.b_iv,
ZIO_DATA_IV_LEN);
memcpy(localprop.zp_mac, hdr->b_crypt_hdr.b_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)
(zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_anon->arcs_size[ARC_BUFC_METADATA]) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *data, kstat_named_t *metadata,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
data->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]);
metadata->value.ui64 =
zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
size->value.ui64 = data->value.ui64 + metadata->value.ui64;
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_iohits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_iohits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_iohits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_iohits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_iohits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_iohits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_uncached_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncached_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
wmsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_data,
&as->arcstat_anon_metadata,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_data,
&as->arcstat_mru_metadata,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_data,
&as->arcstat_mru_ghost_metadata,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_data,
&as->arcstat_mfu_metadata,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_data,
&as->arcstat_mfu_ghost_metadata,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
arc_kstat_update_state(arc_uncached,
&as->arcstat_uncached_size,
&as->arcstat_uncached_data,
&as->arcstat_uncached_metadata,
&as->arcstat_uncached_evictable_data,
&as->arcstat_uncached_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
wmsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_predictive_prefetch);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_iohit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
as->arcstat_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_prescient_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_demand_iohit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
if (arc_dnode_limit > arc_c_max)
arc_dnode_limit = arc_c_max;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 0 - <all physical memory> */
arc_dnode_limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_c_max / 100;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_limit, verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if (zfs_arc_lotsfree_percent <= 100)
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(zfs_arc_sys_free, allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_multilist_init(multilist_t *ml,
multilist_sublist_index_func_t *index_func, int *maxcountp)
{
multilist_create(ml, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), index_func);
*maxcountp = MAX(*maxcountp, multilist_get_num_sublists(ml));
}
static void
arc_state_init(void)
{
int num_sublists = 0;
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_uncached->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
arc_state_l2c_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
arc_state_l2c_multilist_index_func, &num_sublists);
/*
* Keep track of the number of markers needed to reclaim buffers from
* any ARC state. The markers will be pre-allocated so as to minimize
* the number of memory allocations performed by the eviction thread.
*/
arc_state_evict_marker_count = num_sublists;
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA], 0);
wmsum_init(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA], 0);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_iohits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_iohits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_uncached_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
wmsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
wmsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_iohit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
arc_uncached->arcs_state = ARC_STATE_UNCACHED;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_size[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_uncached->arcs_size[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_uncached->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mru_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_DATA]);
wmsum_fini(&arc_mfu_ghost->arcs_hits[ARC_BUFC_METADATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_iohits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_iohits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_iohits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_uncached_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
wmsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
wmsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_iohit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
/*
* 32-bit fixed point fractions of metadata from total ARC size,
* MRU data from all data and MRU metadata from all metadata.
*/
arc_meta = (1ULL << 32) / 4; /* Metadata is 25% of arc_c. */
arc_pd = (1ULL << 32) / 2; /* Data MRU is 50% of data. */
arc_pm = (1ULL << 32) / 2; /* Metadata MRU is 50% of metadata. */
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_limit = arc_c_max * percent / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_state_evict_markers =
arc_state_alloc_markers(arc_state_evict_marker_count);
arc_evict_zthr = zthr_create_timer("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, SEC2NSEC(1), defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
- while ((p = list_head(&arc_prune_list)) != NULL) {
- list_remove(&arc_prune_list, p);
+ while ((p = list_remove_head(&arc_prune_list)) != NULL) {
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
arc_state_free_markers(arc_state_evict_markers,
arc_state_evict_marker_count);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
- uint64_t size, dev_size, tsize;
+ uint64_t size;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
- /*
- * Make sure the write size does not exceed the size of the cache
- * device. This is important in l2arc_evict(), otherwise infinite
- * iteration can occur.
- */
- dev_size = dev->l2ad_end - dev->l2ad_start;
-
/* We need to add in the worst case scenario of log block overhead. */
- tsize = size + l2arc_log_blk_overhead(size, dev);
+ size += l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the writesize, whichever is greater.
*/
- tsize += MAX(64 * 1024 * 1024,
- (tsize * l2arc_trim_ahead) / 100);
+ size += MAX(64 * 1024 * 1024,
+ (size * l2arc_trim_ahead) / 100);
}
- if (tsize >= dev_size) {
+ /*
+ * Make sure the write size does not exceed the size of the cache
+ * device. This is important in l2arc_evict(), otherwise infinite
+ * iteration can occur.
+ */
+ if (size > dev->l2ad_end - dev->l2ad_start) {
cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
"plus the overhead of log blocks (persistent L2ARC, "
"%llu bytes) exceeds the size of the cache device "
"(guid %llu), resetting them to the default (%d)",
(u_longlong_t)l2arc_log_blk_overhead(size, dev),
(u_longlong_t)dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
+
size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
+ if (l2arc_trim_ahead > 1) {
+ cmn_err(CE_NOTE, "l2arc_trim_ahead set to 1");
+ l2arc_trim_ahead = 1;
+ }
+
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
+
+ size += l2arc_log_blk_overhead(size, dev);
+ if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0) {
+ size += MAX(64 * 1024 * 1024,
+ (size * l2arc_trim_ahead) / 100);
+ }
}
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
ASSERT3P(next, !=, NULL);
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
- list_t *buflist;
- l2arc_data_free_t *df, *df_prev;
+ l2arc_data_free_t *df;
mutex_enter(&l2arc_free_on_write_mtx);
- buflist = l2arc_free_on_write;
-
- for (df = list_tail(buflist); df; df = df_prev) {
- df_prev = list_prev(buflist, df);
+ while ((df = list_remove_head(l2arc_free_on_write)) != NULL) {
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
- list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
-
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
memset(l2dhdr, 0,
dev->l2ad_dev_hdr_asize);
} else {
memset(&l2dhdr->dh_start_lbps[i], 0,
sizeof (l2arc_log_blkptr_t));
}
break;
}
memcpy(&l2dhdr->dh_start_lbps[i], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_USE_RESERVE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
top:
rerun = B_FALSE;
- if (dev->l2ad_hand >= (dev->l2ad_end - distance)) {
+ if (dev->l2ad_hand + distance > dev->l2ad_end) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
- ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict);
+ ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
/*
* In some cases, we can wind up with size > asize, so
* we need to opt for the larger allocation option here.
*
* (We also need abd_return_buf_copy in all cases because
* it's an ASSERT() to modify the buffer before returning it
* with arc_return_buf(), and all the compressors
* write things before deciding to fail compression in nearly
* every case.)
*/
cabd = abd_alloc_for_io(size, ismd);
tmp = abd_borrow_buf(cabd, size);
psize = zio_compress_data(compress, to_write, &tmp, size,
hdr->b_complevel);
if (psize >= asize) {
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
memset((char *)tmp + psize, 0, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, size);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(memcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
- if ((write_asize + asize) > target_sz) {
+ /*
+ * If the allocated size of this buffer plus the max
+ * size for the pending log block exceeds the evicted
+ * target size, terminate writing buffers for this run.
+ */
+ if (write_asize + asize +
+ sizeof (l2arc_log_blk_phys_t) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
- if (l2arc_log_blk_insert(dev, hdr))
- l2arc_log_blk_commit(dev, pio, cb);
+ if (l2arc_log_blk_insert(dev, hdr)) {
+ /*
+ * l2ad_hand will be adjusted in
+ * l2arc_log_blk_commit().
+ */
+ write_asize +=
+ l2arc_log_blk_commit(dev, pio, cb);
+ }
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
static __attribute__((noreturn)) void
l2arc_feed_thread(void *unused)
{
(void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
memset(l2dhdr, 0, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static __attribute__((noreturn)) void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
memcpy(lbps, l2dhdr->dh_start_lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
memcpy(lb_ptr_buf->lb_ptr, &lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
kpreempt(KPREEMPT_SYNC);
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
memset(l2dhdr, 0, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
- ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
- ZIO_FLAG_DONT_RETRY);
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
- ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
+ ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
-static void
+static uint64_t
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf = NULL;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, (void **) &tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
memset(tmpbuf + psize, 0, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
memcpy(tmpbuf, lb, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
memcpy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[0],
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
+
+ return (asize);
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
memset(le, 0, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
spl_param_get_u64, ZMOD_RW, "Minimum ARC size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
spl_param_get_u64, ZMOD_RW, "Maximum ARC size in bytes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_balance, UINT, ZMOD_RW,
"Balance between metadata and data on ghost hits.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_uint, ZMOD_RW, "Seconds before growing ARC size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_uint, ZMOD_RW, "log2(fraction of ARC to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim ARC to");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, UINT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed ARC buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_uint, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, U64, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, U64, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, U64, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, U64, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, U64, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, U64, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, U64, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, UINT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, U64, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW,
"Exclude dbufs on special vdevs from being cached to L2ARC if set.");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_uint, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_u64,
spl_param_get_u64, ZMOD_RW, "Minimum bytes of dnodes in ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_int, param_get_uint, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, UINT, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, UINT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, UINT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
"Number of arc_prune threads");
diff --git a/sys/contrib/openzfs/module/zfs/bplist.c b/sys/contrib/openzfs/module/zfs/bplist.c
index 1c1f7892bb7d..da7360f8ce10 100644
--- a/sys/contrib/openzfs/module/zfs/bplist.c
+++ b/sys/contrib/openzfs/module/zfs/bplist.c
@@ -1,91 +1,87 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
#include <sys/bplist.h>
#include <sys/zfs_context.h>
void
bplist_create(bplist_t *bpl)
{
mutex_init(&bpl->bpl_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&bpl->bpl_list, sizeof (bplist_entry_t),
offsetof(bplist_entry_t, bpe_node));
}
void
bplist_destroy(bplist_t *bpl)
{
list_destroy(&bpl->bpl_list);
mutex_destroy(&bpl->bpl_lock);
}
void
bplist_append(bplist_t *bpl, const blkptr_t *bp)
{
bplist_entry_t *bpe = kmem_alloc(sizeof (*bpe), KM_SLEEP);
mutex_enter(&bpl->bpl_lock);
bpe->bpe_blk = *bp;
list_insert_tail(&bpl->bpl_list, bpe);
mutex_exit(&bpl->bpl_lock);
}
/*
* To aid debugging, we keep the most recently removed entry. This way if
* we are in the callback, we can easily locate the entry.
*/
static bplist_entry_t *bplist_iterate_last_removed;
void
bplist_iterate(bplist_t *bpl, bplist_itor_t *func, void *arg, dmu_tx_t *tx)
{
bplist_entry_t *bpe;
mutex_enter(&bpl->bpl_lock);
- while ((bpe = list_head(&bpl->bpl_list))) {
+ while ((bpe = list_remove_head(&bpl->bpl_list))) {
bplist_iterate_last_removed = bpe;
- list_remove(&bpl->bpl_list, bpe);
mutex_exit(&bpl->bpl_lock);
func(arg, &bpe->bpe_blk, tx);
kmem_free(bpe, sizeof (*bpe));
mutex_enter(&bpl->bpl_lock);
}
mutex_exit(&bpl->bpl_lock);
}
void
bplist_clear(bplist_t *bpl)
{
bplist_entry_t *bpe;
mutex_enter(&bpl->bpl_lock);
- while ((bpe = list_head(&bpl->bpl_list))) {
- bplist_iterate_last_removed = bpe;
- list_remove(&bpl->bpl_list, bpe);
+ while ((bpe = list_remove_head(&bpl->bpl_list)))
kmem_free(bpe, sizeof (*bpe));
- }
mutex_exit(&bpl->bpl_lock);
}
diff --git a/sys/contrib/openzfs/module/zfs/btree.c b/sys/contrib/openzfs/module/zfs/btree.c
index 4c25afaa8199..af2b94a850be 100644
--- a/sys/contrib/openzfs/module/zfs/btree.c
+++ b/sys/contrib/openzfs/module/zfs/btree.c
@@ -1,2207 +1,2215 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
#include <sys/btree.h>
#include <sys/bitops.h>
#include <sys/zfs_context.h>
kmem_cache_t *zfs_btree_leaf_cache;
/*
* Control the extent of the verification that occurs when zfs_btree_verify is
* called. Primarily used for debugging when extending the btree logic and
* functionality. As the intensity is increased, new verification steps are
* added. These steps are cumulative; intensity = 3 includes the intensity = 1
* and intensity = 2 steps as well.
*
* Intensity 1: Verify that the tree's height is consistent throughout.
* Intensity 2: Verify that a core node's children's parent pointers point
* to the core node.
* Intensity 3: Verify that the total number of elements in the tree matches the
* sum of the number of elements in each node. Also verifies that each node's
* count obeys the invariants (less than or equal to maximum value, greater than
* or equal to half the maximum minus one).
* Intensity 4: Verify that each element compares less than the element
* immediately after it and greater than the one immediately before it using the
* comparator function. For core nodes, also checks that each element is greater
* than the last element in the first of the two nodes it separates, and less
* than the first element in the second of the two nodes.
* Intensity 5: Verifies, if ZFS_DEBUG is defined, that all unused memory inside
* of each node is poisoned appropriately. Note that poisoning always occurs if
* ZFS_DEBUG is set, so it is safe to set the intensity to 5 during normal
* operation.
*
* Intensity 4 and 5 are particularly expensive to perform; the previous levels
* are a few memory operations per node, while these levels require multiple
* operations per element. In addition, when creating large btrees, these
* operations are called at every step, resulting in extremely slow operation
* (while the asymptotic complexity of the other steps is the same, the
* importance of the constant factors cannot be denied).
*/
uint_t zfs_btree_verify_intensity = 0;
/*
* Convenience functions to silence warnings from memcpy/memmove's
* return values and change argument order to src, dest.
*/
static void
bcpy(const void *src, void *dest, size_t size)
{
(void) memcpy(dest, src, size);
}
static void
bmov(const void *src, void *dest, size_t size)
{
(void) memmove(dest, src, size);
}
static boolean_t
zfs_btree_is_core(struct zfs_btree_hdr *hdr)
{
return (hdr->bth_first == -1);
}
#ifdef _ILP32
#define BTREE_POISON 0xabadb10c
#else
#define BTREE_POISON 0xabadb10cdeadbeef
#endif
static void
zfs_btree_poison_node(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;
i++) {
node->btc_children[i] =
(zfs_btree_hdr_t *)BTREE_POISON;
}
(void) memset(node->btc_elems + hdr->bth_count * size, 0x0f,
(BTREE_CORE_ELEMS - hdr->bth_count) * size);
} else {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
(void) memset(leaf->btl_elems, 0x0f, hdr->bth_first * size);
(void) memset(leaf->btl_elems +
(hdr->bth_first + hdr->bth_count) * size, 0x0f,
tree->bt_leaf_size - offsetof(zfs_btree_leaf_t, btl_elems) -
(hdr->bth_first + hdr->bth_count) * size);
}
#endif
}
static inline void
zfs_btree_poison_node_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
uint32_t idx, uint32_t count)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
ASSERT3U(idx, >=, hdr->bth_count);
ASSERT3U(idx, <=, BTREE_CORE_ELEMS);
ASSERT3U(idx + count, <=, BTREE_CORE_ELEMS);
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 1; i <= count; i++) {
node->btc_children[idx + i] =
(zfs_btree_hdr_t *)BTREE_POISON;
}
(void) memset(node->btc_elems + idx * size, 0x0f, count * size);
} else {
ASSERT3U(idx, <=, tree->bt_leaf_cap);
ASSERT3U(idx + count, <=, tree->bt_leaf_cap);
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
(void) memset(leaf->btl_elems +
(hdr->bth_first + idx) * size, 0x0f, count * size);
}
#endif
}
static inline void
zfs_btree_verify_poison_at(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
uint32_t idx)
{
#ifdef ZFS_DEBUG
size_t size = tree->bt_elem_size;
if (zfs_btree_is_core(hdr)) {
ASSERT3U(idx, <, BTREE_CORE_ELEMS);
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
zfs_btree_hdr_t *cval = (zfs_btree_hdr_t *)BTREE_POISON;
VERIFY3P(node->btc_children[idx + 1], ==, cval);
for (size_t i = 0; i < size; i++)
VERIFY3U(node->btc_elems[idx * size + i], ==, 0x0f);
} else {
ASSERT3U(idx, <, tree->bt_leaf_cap);
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
if (idx >= tree->bt_leaf_cap - hdr->bth_first)
return;
for (size_t i = 0; i < size; i++) {
VERIFY3U(leaf->btl_elems[(hdr->bth_first + idx)
* size + i], ==, 0x0f);
}
}
#endif
}
void
zfs_btree_init(void)
{
zfs_btree_leaf_cache = kmem_cache_create("zfs_btree_leaf_cache",
BTREE_LEAF_SIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
zfs_btree_fini(void)
{
kmem_cache_destroy(zfs_btree_leaf_cache);
}
static void *
zfs_btree_leaf_alloc(zfs_btree_t *tree)
{
if (tree->bt_leaf_size == BTREE_LEAF_SIZE)
return (kmem_cache_alloc(zfs_btree_leaf_cache, KM_SLEEP));
else
return (kmem_alloc(tree->bt_leaf_size, KM_SLEEP));
}
static void
zfs_btree_leaf_free(zfs_btree_t *tree, void *ptr)
{
if (tree->bt_leaf_size == BTREE_LEAF_SIZE)
return (kmem_cache_free(zfs_btree_leaf_cache, ptr));
else
return (kmem_free(ptr, tree->bt_leaf_size));
}
void
zfs_btree_create(zfs_btree_t *tree, int (*compar) (const void *, const void *),
- size_t size)
+ bt_find_in_buf_f bt_find_in_buf, size_t size)
{
- zfs_btree_create_custom(tree, compar, size, BTREE_LEAF_SIZE);
+ zfs_btree_create_custom(tree, compar, bt_find_in_buf, size,
+ BTREE_LEAF_SIZE);
}
+static void *
+zfs_btree_find_in_buf(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems,
+ const void *value, zfs_btree_index_t *where);
+
void
zfs_btree_create_custom(zfs_btree_t *tree,
int (*compar) (const void *, const void *),
+ bt_find_in_buf_f bt_find_in_buf,
size_t size, size_t lsize)
{
size_t esize = lsize - offsetof(zfs_btree_leaf_t, btl_elems);
ASSERT3U(size, <=, esize / 2);
memset(tree, 0, sizeof (*tree));
tree->bt_compar = compar;
+ tree->bt_find_in_buf = (bt_find_in_buf == NULL) ?
+ zfs_btree_find_in_buf : bt_find_in_buf;
tree->bt_elem_size = size;
tree->bt_leaf_size = lsize;
tree->bt_leaf_cap = P2ALIGN(esize / size, 2);
tree->bt_height = -1;
tree->bt_bulk = NULL;
}
/*
* Find value in the array of elements provided. Uses a simple binary search.
*/
static void *
zfs_btree_find_in_buf(zfs_btree_t *tree, uint8_t *buf, uint32_t nelems,
const void *value, zfs_btree_index_t *where)
{
uint32_t max = nelems;
uint32_t min = 0;
while (max > min) {
uint32_t idx = (min + max) / 2;
uint8_t *cur = buf + idx * tree->bt_elem_size;
int comp = tree->bt_compar(cur, value);
if (comp < 0) {
min = idx + 1;
} else if (comp > 0) {
max = idx;
} else {
where->bti_offset = idx;
where->bti_before = B_FALSE;
return (cur);
}
}
where->bti_offset = max;
where->bti_before = B_TRUE;
return (NULL);
}
/*
* Find the given value in the tree. where may be passed as null to use as a
* membership test or if the btree is being used as a map.
*/
void *
zfs_btree_find(zfs_btree_t *tree, const void *value, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
if (where != NULL) {
where->bti_node = NULL;
where->bti_offset = 0;
}
ASSERT0(tree->bt_num_elems);
return (NULL);
}
/*
* If we're in bulk-insert mode, we check the last spot in the tree
* and the last leaf in the tree before doing the normal search,
* because for most workloads the vast majority of finds in
* bulk-insert mode are to insert new elements.
*/
zfs_btree_index_t idx;
size_t size = tree->bt_elem_size;
if (tree->bt_bulk != NULL) {
zfs_btree_leaf_t *last_leaf = tree->bt_bulk;
int comp = tree->bt_compar(last_leaf->btl_elems +
(last_leaf->btl_hdr.bth_first +
last_leaf->btl_hdr.bth_count - 1) * size, value);
if (comp < 0) {
/*
* If what they're looking for is after the last
* element, it's not in the tree.
*/
if (where != NULL) {
where->bti_node = (zfs_btree_hdr_t *)last_leaf;
where->bti_offset =
last_leaf->btl_hdr.bth_count;
where->bti_before = B_TRUE;
}
return (NULL);
} else if (comp == 0) {
if (where != NULL) {
where->bti_node = (zfs_btree_hdr_t *)last_leaf;
where->bti_offset =
last_leaf->btl_hdr.bth_count - 1;
where->bti_before = B_FALSE;
}
return (last_leaf->btl_elems +
(last_leaf->btl_hdr.bth_first +
last_leaf->btl_hdr.bth_count - 1) * size);
}
if (tree->bt_compar(last_leaf->btl_elems +
last_leaf->btl_hdr.bth_first * size, value) <= 0) {
/*
* If what they're looking for is after the first
* element in the last leaf, it's in the last leaf or
* it's not in the tree.
*/
- void *d = zfs_btree_find_in_buf(tree,
+ void *d = tree->bt_find_in_buf(tree,
last_leaf->btl_elems +
last_leaf->btl_hdr.bth_first * size,
last_leaf->btl_hdr.bth_count, value, &idx);
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)last_leaf;
*where = idx;
}
return (d);
}
}
zfs_btree_core_t *node = NULL;
uint32_t child = 0;
uint32_t depth = 0;
/*
* Iterate down the tree, finding which child the value should be in
* by comparing with the separators.
*/
for (node = (zfs_btree_core_t *)tree->bt_root; depth < tree->bt_height;
node = (zfs_btree_core_t *)node->btc_children[child], depth++) {
ASSERT3P(node, !=, NULL);
- void *d = zfs_btree_find_in_buf(tree, node->btc_elems,
+ void *d = tree->bt_find_in_buf(tree, node->btc_elems,
node->btc_hdr.bth_count, value, &idx);
EQUIV(d != NULL, !idx.bti_before);
if (d != NULL) {
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)node;
*where = idx;
}
return (d);
}
ASSERT(idx.bti_before);
child = idx.bti_offset;
}
/*
* The value is in this leaf, or it would be if it were in the
* tree. Find its proper location and return it.
*/
zfs_btree_leaf_t *leaf = (depth == 0 ?
(zfs_btree_leaf_t *)tree->bt_root : (zfs_btree_leaf_t *)node);
- void *d = zfs_btree_find_in_buf(tree, leaf->btl_elems +
+ void *d = tree->bt_find_in_buf(tree, leaf->btl_elems +
leaf->btl_hdr.bth_first * size,
leaf->btl_hdr.bth_count, value, &idx);
if (where != NULL) {
idx.bti_node = (zfs_btree_hdr_t *)leaf;
*where = idx;
}
return (d);
}
/*
* To explain the following functions, it is useful to understand the four
* kinds of shifts used in btree operation. First, a shift is a movement of
* elements within a node. It is used to create gaps for inserting new
* elements and children, or cover gaps created when things are removed. A
* shift has two fundamental properties, each of which can be one of two
* values, making four types of shifts. There is the direction of the shift
* (left or right) and the shape of the shift (parallelogram or isoceles
* trapezoid (shortened to trapezoid hereafter)). The shape distinction only
* applies to shifts of core nodes.
*
* The names derive from the following imagining of the layout of a node:
*
* Elements: * * * * * * * ... * * *
* Children: * * * * * * * * ... * * *
*
* This layout follows from the fact that the elements act as separators
* between pairs of children, and that children root subtrees "below" the
* current node. A left and right shift are fairly self-explanatory; a left
* shift moves things to the left, while a right shift moves things to the
* right. A parallelogram shift is a shift with the same number of elements
* and children being moved, while a trapezoid shift is a shift that moves one
* more children than elements. An example follows:
*
* A parallelogram shift could contain the following:
* _______________
* \* * * * \ * * * ... * * *
* * \ * * * *\ * * * ... * * *
* ---------------
* A trapezoid shift could contain the following:
* ___________
* * / * * * \ * * * ... * * *
* * / * * * *\ * * * ... * * *
* ---------------
*
* Note that a parallelogram shift is always shaped like a "left-leaning"
* parallelogram, where the starting index of the children being moved is
* always one higher than the starting index of the elements being moved. No
* "right-leaning" parallelogram shifts are needed (shifts where the starting
* element index and starting child index being moved are the same) to achieve
* any btree operations, so we ignore them.
*/
enum bt_shift_shape {
BSS_TRAPEZOID,
BSS_PARALLELOGRAM
};
enum bt_shift_direction {
BSD_LEFT,
BSD_RIGHT
};
/*
* Shift elements and children in the provided core node by off spots. The
* first element moved is idx, and count elements are moved. The shape of the
* shift is determined by shape. The direction is determined by dir.
*/
static inline void
bt_shift_core(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, uint32_t off, enum bt_shift_shape shape,
enum bt_shift_direction dir)
{
size_t size = tree->bt_elem_size;
ASSERT(zfs_btree_is_core(&node->btc_hdr));
uint8_t *e_start = node->btc_elems + idx * size;
uint8_t *e_out = (dir == BSD_LEFT ? e_start - off * size :
e_start + off * size);
bmov(e_start, e_out, count * size);
zfs_btree_hdr_t **c_start = node->btc_children + idx +
(shape == BSS_TRAPEZOID ? 0 : 1);
zfs_btree_hdr_t **c_out = (dir == BSD_LEFT ? c_start - off :
c_start + off);
uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);
bmov(c_start, c_out, c_count * sizeof (*c_start));
}
/*
* Shift elements and children in the provided core node left by one spot.
* The first element moved is idx, and count elements are moved. The
* shape of the shift is determined by trap; true if the shift is a trapezoid,
* false if it is a parallelogram.
*/
static inline void
bt_shift_core_left(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, enum bt_shift_shape shape)
{
bt_shift_core(tree, node, idx, count, 1, shape, BSD_LEFT);
}
/*
* Shift elements and children in the provided core node right by one spot.
* Starts with elements[idx] and children[idx] and one more child than element.
*/
static inline void
bt_shift_core_right(zfs_btree_t *tree, zfs_btree_core_t *node, uint32_t idx,
uint32_t count, enum bt_shift_shape shape)
{
bt_shift_core(tree, node, idx, count, 1, shape, BSD_RIGHT);
}
/*
* Shift elements and children in the provided leaf node by off spots.
* The first element moved is idx, and count elements are moved. The direction
* is determined by left.
*/
static inline void
bt_shift_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *node, uint32_t idx,
uint32_t count, uint32_t off, enum bt_shift_direction dir)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = &node->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
if (count == 0)
return;
uint8_t *start = node->btl_elems + (hdr->bth_first + idx) * size;
uint8_t *out = (dir == BSD_LEFT ? start - off * size :
start + off * size);
bmov(start, out, count * size);
}
/*
* Grow leaf for n new elements before idx.
*/
static void
bt_grow_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,
uint32_t n)
{
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
ASSERT3U(idx, <=, hdr->bth_count);
uint32_t capacity = tree->bt_leaf_cap;
ASSERT3U(hdr->bth_count + n, <=, capacity);
boolean_t cl = (hdr->bth_first >= n);
boolean_t cr = (hdr->bth_first + hdr->bth_count + n <= capacity);
if (cl && (!cr || idx <= hdr->bth_count / 2)) {
/* Grow left. */
hdr->bth_first -= n;
bt_shift_leaf(tree, leaf, n, idx, n, BSD_LEFT);
} else if (cr) {
/* Grow right. */
bt_shift_leaf(tree, leaf, idx, hdr->bth_count - idx, n,
BSD_RIGHT);
} else {
/* Grow both ways. */
uint32_t fn = hdr->bth_first -
(capacity - (hdr->bth_count + n)) / 2;
hdr->bth_first -= fn;
bt_shift_leaf(tree, leaf, fn, idx, fn, BSD_LEFT);
bt_shift_leaf(tree, leaf, fn + idx, hdr->bth_count - idx,
n - fn, BSD_RIGHT);
}
hdr->bth_count += n;
}
/*
* Shrink leaf for count elements starting from idx.
*/
static void
bt_shrink_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf, uint32_t idx,
uint32_t n)
{
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT(!zfs_btree_is_core(hdr));
ASSERT3U(idx, <=, hdr->bth_count);
ASSERT3U(idx + n, <=, hdr->bth_count);
if (idx <= (hdr->bth_count - n) / 2) {
bt_shift_leaf(tree, leaf, 0, idx, n, BSD_RIGHT);
zfs_btree_poison_node_at(tree, hdr, 0, n);
hdr->bth_first += n;
} else {
bt_shift_leaf(tree, leaf, idx + n, hdr->bth_count - idx - n, n,
BSD_LEFT);
zfs_btree_poison_node_at(tree, hdr, hdr->bth_count - n, n);
}
hdr->bth_count -= n;
}
/*
* Move children and elements from one core node to another. The shape
* parameter behaves the same as it does in the shift logic.
*/
static inline void
bt_transfer_core(zfs_btree_t *tree, zfs_btree_core_t *source, uint32_t sidx,
uint32_t count, zfs_btree_core_t *dest, uint32_t didx,
enum bt_shift_shape shape)
{
size_t size = tree->bt_elem_size;
ASSERT(zfs_btree_is_core(&source->btc_hdr));
ASSERT(zfs_btree_is_core(&dest->btc_hdr));
bcpy(source->btc_elems + sidx * size, dest->btc_elems + didx * size,
count * size);
uint32_t c_count = count + (shape == BSS_TRAPEZOID ? 1 : 0);
bcpy(source->btc_children + sidx + (shape == BSS_TRAPEZOID ? 0 : 1),
dest->btc_children + didx + (shape == BSS_TRAPEZOID ? 0 : 1),
c_count * sizeof (*source->btc_children));
}
static inline void
bt_transfer_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *source, uint32_t sidx,
uint32_t count, zfs_btree_leaf_t *dest, uint32_t didx)
{
size_t size = tree->bt_elem_size;
ASSERT(!zfs_btree_is_core(&source->btl_hdr));
ASSERT(!zfs_btree_is_core(&dest->btl_hdr));
bcpy(source->btl_elems + (source->btl_hdr.bth_first + sidx) * size,
dest->btl_elems + (dest->btl_hdr.bth_first + didx) * size,
count * size);
}
/*
* Find the first element in the subtree rooted at hdr, return its value and
* put its location in where if non-null.
*/
static void *
zfs_btree_first_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
zfs_btree_index_t *where)
{
zfs_btree_hdr_t *node;
for (node = hdr; zfs_btree_is_core(node);
node = ((zfs_btree_core_t *)node)->btc_children[0])
;
ASSERT(!zfs_btree_is_core(node));
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;
if (where != NULL) {
where->bti_node = node;
where->bti_offset = 0;
where->bti_before = B_FALSE;
}
return (&leaf->btl_elems[node->bth_first * tree->bt_elem_size]);
}
/* Insert an element and a child into a core node at the given offset. */
static void
zfs_btree_insert_core_impl(zfs_btree_t *tree, zfs_btree_core_t *parent,
uint32_t offset, zfs_btree_hdr_t *new_node, void *buf)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;
ASSERT3P(par_hdr, ==, new_node->bth_parent);
ASSERT3U(par_hdr->bth_count, <, BTREE_CORE_ELEMS);
if (zfs_btree_verify_intensity >= 5) {
zfs_btree_verify_poison_at(tree, par_hdr,
par_hdr->bth_count);
}
/* Shift existing elements and children */
uint32_t count = par_hdr->bth_count - offset;
bt_shift_core_right(tree, parent, offset, count,
BSS_PARALLELOGRAM);
/* Insert new values */
parent->btc_children[offset + 1] = new_node;
bcpy(buf, parent->btc_elems + offset * size, size);
par_hdr->bth_count++;
}
/*
* Insert new_node into the parent of old_node directly after old_node, with
* buf as the dividing element between the two.
*/
static void
zfs_btree_insert_into_parent(zfs_btree_t *tree, zfs_btree_hdr_t *old_node,
zfs_btree_hdr_t *new_node, void *buf)
{
ASSERT3P(old_node->bth_parent, ==, new_node->bth_parent);
size_t size = tree->bt_elem_size;
zfs_btree_core_t *parent = old_node->bth_parent;
/*
* If this is the root node we were splitting, we create a new root
* and increase the height of the tree.
*/
if (parent == NULL) {
ASSERT3P(old_node, ==, tree->bt_root);
tree->bt_num_nodes++;
zfs_btree_core_t *new_root =
kmem_alloc(sizeof (zfs_btree_core_t) + BTREE_CORE_ELEMS *
size, KM_SLEEP);
zfs_btree_hdr_t *new_root_hdr = &new_root->btc_hdr;
new_root_hdr->bth_parent = NULL;
new_root_hdr->bth_first = -1;
new_root_hdr->bth_count = 1;
old_node->bth_parent = new_node->bth_parent = new_root;
new_root->btc_children[0] = old_node;
new_root->btc_children[1] = new_node;
bcpy(buf, new_root->btc_elems, size);
tree->bt_height++;
tree->bt_root = new_root_hdr;
zfs_btree_poison_node(tree, new_root_hdr);
return;
}
/*
* Since we have the new separator, binary search for where to put
* new_node.
*/
zfs_btree_hdr_t *par_hdr = &parent->btc_hdr;
zfs_btree_index_t idx;
ASSERT(zfs_btree_is_core(par_hdr));
- VERIFY3P(zfs_btree_find_in_buf(tree, parent->btc_elems,
+ VERIFY3P(tree->bt_find_in_buf(tree, parent->btc_elems,
par_hdr->bth_count, buf, &idx), ==, NULL);
ASSERT(idx.bti_before);
uint32_t offset = idx.bti_offset;
ASSERT3U(offset, <=, par_hdr->bth_count);
ASSERT3P(parent->btc_children[offset], ==, old_node);
/*
* If the parent isn't full, shift things to accommodate our insertions
* and return.
*/
if (par_hdr->bth_count != BTREE_CORE_ELEMS) {
zfs_btree_insert_core_impl(tree, parent, offset, new_node, buf);
return;
}
/*
* We need to split this core node into two. Currently there are
* BTREE_CORE_ELEMS + 1 child nodes, and we are adding one for
* BTREE_CORE_ELEMS + 2. Some of the children will be part of the
* current node, and the others will be moved to the new core node.
* There are BTREE_CORE_ELEMS + 1 elements including the new one. One
* will be used as the new separator in our parent, and the others
* will be split among the two core nodes.
*
* Usually we will split the node in half evenly, with
* BTREE_CORE_ELEMS/2 elements in each node. If we're bulk loading, we
* instead move only about a quarter of the elements (and children) to
* the new node. Since the average state after a long time is a 3/4
* full node, shortcutting directly to that state improves efficiency.
*
* We do this in two stages: first we split into two nodes, and then we
* reuse our existing logic to insert the new element and child.
*/
uint32_t move_count = MAX((BTREE_CORE_ELEMS / (tree->bt_bulk == NULL ?
2 : 4)) - 1, 2);
uint32_t keep_count = BTREE_CORE_ELEMS - move_count - 1;
ASSERT3U(BTREE_CORE_ELEMS - move_count, >=, 2);
tree->bt_num_nodes++;
zfs_btree_core_t *new_parent = kmem_alloc(sizeof (zfs_btree_core_t) +
BTREE_CORE_ELEMS * size, KM_SLEEP);
zfs_btree_hdr_t *new_par_hdr = &new_parent->btc_hdr;
new_par_hdr->bth_parent = par_hdr->bth_parent;
new_par_hdr->bth_first = -1;
new_par_hdr->bth_count = move_count;
zfs_btree_poison_node(tree, new_par_hdr);
par_hdr->bth_count = keep_count;
bt_transfer_core(tree, parent, keep_count + 1, move_count, new_parent,
0, BSS_TRAPEZOID);
/* Store the new separator in a buffer. */
uint8_t *tmp_buf = kmem_alloc(size, KM_SLEEP);
bcpy(parent->btc_elems + keep_count * size, tmp_buf,
size);
zfs_btree_poison_node(tree, par_hdr);
if (offset < keep_count) {
/* Insert the new node into the left half */
zfs_btree_insert_core_impl(tree, parent, offset, new_node,
buf);
/*
* Move the new separator to the existing buffer.
*/
bcpy(tmp_buf, buf, size);
} else if (offset > keep_count) {
/* Insert the new node into the right half */
new_node->bth_parent = new_parent;
zfs_btree_insert_core_impl(tree, new_parent,
offset - keep_count - 1, new_node, buf);
/*
* Move the new separator to the existing buffer.
*/
bcpy(tmp_buf, buf, size);
} else {
/*
* Move the new separator into the right half, and replace it
* with buf. We also need to shift back the elements in the
* right half to accommodate new_node.
*/
bt_shift_core_right(tree, new_parent, 0, move_count,
BSS_TRAPEZOID);
new_parent->btc_children[0] = new_node;
bcpy(tmp_buf, new_parent->btc_elems, size);
new_par_hdr->bth_count++;
}
kmem_free(tmp_buf, size);
zfs_btree_poison_node(tree, par_hdr);
for (uint32_t i = 0; i <= new_parent->btc_hdr.bth_count; i++)
new_parent->btc_children[i]->bth_parent = new_parent;
for (uint32_t i = 0; i <= parent->btc_hdr.bth_count; i++)
ASSERT3P(parent->btc_children[i]->bth_parent, ==, parent);
/*
* Now that the node is split, we need to insert the new node into its
* parent. This may cause further splitting.
*/
zfs_btree_insert_into_parent(tree, &parent->btc_hdr,
&new_parent->btc_hdr, buf);
}
/* Insert an element into a leaf node at the given offset. */
static void
zfs_btree_insert_leaf_impl(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,
uint32_t idx, const void *value)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
ASSERT3U(leaf->btl_hdr.bth_count, <, tree->bt_leaf_cap);
if (zfs_btree_verify_intensity >= 5) {
zfs_btree_verify_poison_at(tree, &leaf->btl_hdr,
leaf->btl_hdr.bth_count);
}
bt_grow_leaf(tree, leaf, idx, 1);
uint8_t *start = leaf->btl_elems + (hdr->bth_first + idx) * size;
bcpy(value, start, size);
}
static void
zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr);
/* Helper function for inserting a new value into leaf at the given index. */
static void
zfs_btree_insert_into_leaf(zfs_btree_t *tree, zfs_btree_leaf_t *leaf,
const void *value, uint32_t idx)
{
size_t size = tree->bt_elem_size;
uint32_t capacity = tree->bt_leaf_cap;
/*
* If the leaf isn't full, shift the elements after idx and insert
* value.
*/
if (leaf->btl_hdr.bth_count != capacity) {
zfs_btree_insert_leaf_impl(tree, leaf, idx, value);
return;
}
/*
* Otherwise, we split the leaf node into two nodes. If we're not bulk
* inserting, each is of size (capacity / 2). If we are bulk
* inserting, we move a quarter of the elements to the new node so
* inserts into the old node don't cause immediate splitting but the
* tree stays relatively dense. Since the average state after a long
* time is a 3/4 full node, shortcutting directly to that state
* improves efficiency. At the end of the bulk insertion process
* we'll need to go through and fix up any nodes (the last leaf and
* its ancestors, potentially) that are below the minimum.
*
* In either case, we're left with one extra element. The leftover
* element will become the new dividing element between the two nodes.
*/
uint32_t move_count = MAX(capacity / (tree->bt_bulk ? 4 : 2), 1) - 1;
uint32_t keep_count = capacity - move_count - 1;
ASSERT3U(keep_count, >=, 1);
/* If we insert on left. move one more to keep leaves balanced. */
if (idx < keep_count) {
keep_count--;
move_count++;
}
tree->bt_num_nodes++;
zfs_btree_leaf_t *new_leaf = zfs_btree_leaf_alloc(tree);
zfs_btree_hdr_t *new_hdr = &new_leaf->btl_hdr;
new_hdr->bth_parent = leaf->btl_hdr.bth_parent;
new_hdr->bth_first = (tree->bt_bulk ? 0 : capacity / 4) +
(idx >= keep_count && idx <= keep_count + move_count / 2);
new_hdr->bth_count = move_count;
zfs_btree_poison_node(tree, new_hdr);
if (tree->bt_bulk != NULL && leaf == tree->bt_bulk)
tree->bt_bulk = new_leaf;
/* Copy the back part to the new leaf. */
bt_transfer_leaf(tree, leaf, keep_count + 1, move_count, new_leaf, 0);
/* We store the new separator in a buffer we control for simplicity. */
uint8_t *buf = kmem_alloc(size, KM_SLEEP);
bcpy(leaf->btl_elems + (leaf->btl_hdr.bth_first + keep_count) * size,
buf, size);
bt_shrink_leaf(tree, leaf, keep_count, 1 + move_count);
if (idx < keep_count) {
/* Insert into the existing leaf. */
zfs_btree_insert_leaf_impl(tree, leaf, idx, value);
} else if (idx > keep_count) {
/* Insert into the new leaf. */
zfs_btree_insert_leaf_impl(tree, new_leaf, idx - keep_count -
1, value);
} else {
/*
* Insert planned separator into the new leaf, and use
* the new value as the new separator.
*/
zfs_btree_insert_leaf_impl(tree, new_leaf, 0, buf);
bcpy(value, buf, size);
}
/*
* Now that the node is split, we need to insert the new node into its
* parent. This may cause further splitting, bur only of core nodes.
*/
zfs_btree_insert_into_parent(tree, &leaf->btl_hdr, &new_leaf->btl_hdr,
buf);
kmem_free(buf, size);
}
static uint32_t
zfs_btree_find_parent_idx(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
void *buf;
if (zfs_btree_is_core(hdr)) {
buf = ((zfs_btree_core_t *)hdr)->btc_elems;
} else {
buf = ((zfs_btree_leaf_t *)hdr)->btl_elems +
hdr->bth_first * tree->bt_elem_size;
}
zfs_btree_index_t idx;
zfs_btree_core_t *parent = hdr->bth_parent;
- VERIFY3P(zfs_btree_find_in_buf(tree, parent->btc_elems,
+ VERIFY3P(tree->bt_find_in_buf(tree, parent->btc_elems,
parent->btc_hdr.bth_count, buf, &idx), ==, NULL);
ASSERT(idx.bti_before);
ASSERT3U(idx.bti_offset, <=, parent->btc_hdr.bth_count);
ASSERT3P(parent->btc_children[idx.bti_offset], ==, hdr);
return (idx.bti_offset);
}
/*
* Take the b-tree out of bulk insert mode. During bulk-insert mode, some
* nodes may violate the invariant that non-root nodes must be at least half
* full. All nodes violating this invariant should be the last node in their
* particular level. To correct the invariant, we take values from their left
* neighbor until they are half full. They must have a left neighbor at their
* level because the last node at a level is not the first node unless it's
* the root.
*/
static void
zfs_btree_bulk_finish(zfs_btree_t *tree)
{
ASSERT3P(tree->bt_bulk, !=, NULL);
ASSERT3P(tree->bt_root, !=, NULL);
zfs_btree_leaf_t *leaf = tree->bt_bulk;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
zfs_btree_core_t *parent = hdr->bth_parent;
size_t size = tree->bt_elem_size;
uint32_t capacity = tree->bt_leaf_cap;
/*
* The invariant doesn't apply to the root node, if that's the only
* node in the tree we're done.
*/
if (parent == NULL) {
tree->bt_bulk = NULL;
return;
}
/* First, take elements to rebalance the leaf node. */
if (hdr->bth_count < capacity / 2) {
/*
* First, find the left neighbor. The simplest way to do this
* is to call zfs_btree_prev twice; the first time finds some
* ancestor of this node, and the second time finds the left
* neighbor. The ancestor found is the lowest common ancestor
* of leaf and the neighbor.
*/
zfs_btree_index_t idx = {
.bti_node = hdr,
.bti_offset = 0
};
VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);
ASSERT(zfs_btree_is_core(idx.bti_node));
zfs_btree_core_t *common = (zfs_btree_core_t *)idx.bti_node;
uint32_t common_idx = idx.bti_offset;
VERIFY3P(zfs_btree_prev(tree, &idx, &idx), !=, NULL);
ASSERT(!zfs_btree_is_core(idx.bti_node));
zfs_btree_leaf_t *l_neighbor = (zfs_btree_leaf_t *)idx.bti_node;
zfs_btree_hdr_t *l_hdr = idx.bti_node;
uint32_t move_count = (capacity / 2) - hdr->bth_count;
ASSERT3U(l_neighbor->btl_hdr.bth_count - move_count, >=,
capacity / 2);
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < move_count; i++) {
zfs_btree_verify_poison_at(tree, hdr,
leaf->btl_hdr.bth_count + i);
}
}
/* First, shift elements in leaf back. */
bt_grow_leaf(tree, leaf, 0, move_count);
/* Next, move the separator from the common ancestor to leaf. */
uint8_t *separator = common->btc_elems + common_idx * size;
uint8_t *out = leaf->btl_elems +
(hdr->bth_first + move_count - 1) * size;
bcpy(separator, out, size);
/*
* Now we move elements from the tail of the left neighbor to
* fill the remaining spots in leaf.
*/
bt_transfer_leaf(tree, l_neighbor, l_hdr->bth_count -
(move_count - 1), move_count - 1, leaf, 0);
/*
* Finally, move the new last element in the left neighbor to
* the separator.
*/
bcpy(l_neighbor->btl_elems + (l_hdr->bth_first +
l_hdr->bth_count - move_count) * size, separator, size);
/* Adjust the node's counts, and we're done. */
bt_shrink_leaf(tree, l_neighbor, l_hdr->bth_count - move_count,
move_count);
ASSERT3U(l_hdr->bth_count, >=, capacity / 2);
ASSERT3U(hdr->bth_count, >=, capacity / 2);
}
/*
* Now we have to rebalance any ancestors of leaf that may also
* violate the invariant.
*/
capacity = BTREE_CORE_ELEMS;
while (parent->btc_hdr.bth_parent != NULL) {
zfs_btree_core_t *cur = parent;
zfs_btree_hdr_t *hdr = &cur->btc_hdr;
parent = hdr->bth_parent;
/*
* If the invariant isn't violated, move on to the next
* ancestor.
*/
if (hdr->bth_count >= capacity / 2)
continue;
/*
* Because the smallest number of nodes we can move when
* splitting is 2, we never need to worry about not having a
* left sibling (a sibling is a neighbor with the same parent).
*/
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
ASSERT3U(parent_idx, >, 0);
zfs_btree_core_t *l_neighbor =
(zfs_btree_core_t *)parent->btc_children[parent_idx - 1];
uint32_t move_count = (capacity / 2) - hdr->bth_count;
ASSERT3U(l_neighbor->btc_hdr.bth_count - move_count, >=,
capacity / 2);
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < move_count; i++) {
zfs_btree_verify_poison_at(tree, hdr,
hdr->bth_count + i);
}
}
/* First, shift things in the right node back. */
bt_shift_core(tree, cur, 0, hdr->bth_count, move_count,
BSS_TRAPEZOID, BSD_RIGHT);
/* Next, move the separator to the right node. */
uint8_t *separator = parent->btc_elems + ((parent_idx - 1) *
size);
uint8_t *e_out = cur->btc_elems + ((move_count - 1) * size);
bcpy(separator, e_out, size);
/*
* Now, move elements and children from the left node to the
* right. We move one more child than elements.
*/
move_count--;
uint32_t move_idx = l_neighbor->btc_hdr.bth_count - move_count;
bt_transfer_core(tree, l_neighbor, move_idx, move_count, cur, 0,
BSS_TRAPEZOID);
/*
* Finally, move the last element in the left node to the
* separator's position.
*/
move_idx--;
bcpy(l_neighbor->btc_elems + move_idx * size, separator, size);
l_neighbor->btc_hdr.bth_count -= move_count + 1;
hdr->bth_count += move_count + 1;
ASSERT3U(l_neighbor->btc_hdr.bth_count, >=, capacity / 2);
ASSERT3U(hdr->bth_count, >=, capacity / 2);
zfs_btree_poison_node(tree, &l_neighbor->btc_hdr);
for (uint32_t i = 0; i <= hdr->bth_count; i++)
cur->btc_children[i]->bth_parent = cur;
}
tree->bt_bulk = NULL;
zfs_btree_verify(tree);
}
/*
* Insert value into tree at the location specified by where.
*/
void
zfs_btree_add_idx(zfs_btree_t *tree, const void *value,
const zfs_btree_index_t *where)
{
zfs_btree_index_t idx = {0};
/* If we're not inserting in the last leaf, end bulk insert mode. */
if (tree->bt_bulk != NULL) {
if (where->bti_node != &tree->bt_bulk->btl_hdr) {
zfs_btree_bulk_finish(tree);
VERIFY3P(zfs_btree_find(tree, value, &idx), ==, NULL);
where = &idx;
}
}
tree->bt_num_elems++;
/*
* If this is the first element in the tree, create a leaf root node
* and add the value to it.
*/
if (where->bti_node == NULL) {
ASSERT3U(tree->bt_num_elems, ==, 1);
ASSERT3S(tree->bt_height, ==, -1);
ASSERT3P(tree->bt_root, ==, NULL);
ASSERT0(where->bti_offset);
tree->bt_num_nodes++;
zfs_btree_leaf_t *leaf = zfs_btree_leaf_alloc(tree);
tree->bt_root = &leaf->btl_hdr;
tree->bt_height++;
zfs_btree_hdr_t *hdr = &leaf->btl_hdr;
hdr->bth_parent = NULL;
hdr->bth_first = 0;
hdr->bth_count = 0;
zfs_btree_poison_node(tree, hdr);
zfs_btree_insert_into_leaf(tree, leaf, value, 0);
tree->bt_bulk = leaf;
} else if (!zfs_btree_is_core(where->bti_node)) {
/*
* If we're inserting into a leaf, go directly to the helper
* function.
*/
zfs_btree_insert_into_leaf(tree,
(zfs_btree_leaf_t *)where->bti_node, value,
where->bti_offset);
} else {
/*
* If we're inserting into a core node, we can't just shift
* the existing element in that slot in the same node without
* breaking our ordering invariants. Instead we place the new
* value in the node at that spot and then insert the old
* separator into the first slot in the subtree to the right.
*/
zfs_btree_core_t *node = (zfs_btree_core_t *)where->bti_node;
/*
* We can ignore bti_before, because either way the value
* should end up in bti_offset.
*/
uint32_t off = where->bti_offset;
zfs_btree_hdr_t *subtree = node->btc_children[off + 1];
size_t size = tree->bt_elem_size;
uint8_t *buf = kmem_alloc(size, KM_SLEEP);
bcpy(node->btc_elems + off * size, buf, size);
bcpy(value, node->btc_elems + off * size, size);
/*
* Find the first slot in the subtree to the right, insert
* there.
*/
zfs_btree_index_t new_idx;
VERIFY3P(zfs_btree_first_helper(tree, subtree, &new_idx), !=,
NULL);
ASSERT0(new_idx.bti_offset);
ASSERT(!zfs_btree_is_core(new_idx.bti_node));
zfs_btree_insert_into_leaf(tree,
(zfs_btree_leaf_t *)new_idx.bti_node, buf, 0);
kmem_free(buf, size);
}
zfs_btree_verify(tree);
}
/*
* Return the first element in the tree, and put its location in where if
* non-null.
*/
void *
zfs_btree_first(zfs_btree_t *tree, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
ASSERT0(tree->bt_num_elems);
return (NULL);
}
return (zfs_btree_first_helper(tree, tree->bt_root, where));
}
/*
* Find the last element in the subtree rooted at hdr, return its value and
* put its location in where if non-null.
*/
static void *
zfs_btree_last_helper(zfs_btree_t *btree, zfs_btree_hdr_t *hdr,
zfs_btree_index_t *where)
{
zfs_btree_hdr_t *node;
for (node = hdr; zfs_btree_is_core(node); node =
((zfs_btree_core_t *)node)->btc_children[node->bth_count])
;
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)node;
if (where != NULL) {
where->bti_node = node;
where->bti_offset = node->bth_count - 1;
where->bti_before = B_FALSE;
}
return (leaf->btl_elems + (node->bth_first + node->bth_count - 1) *
btree->bt_elem_size);
}
/*
* Return the last element in the tree, and put its location in where if
* non-null.
*/
void *
zfs_btree_last(zfs_btree_t *tree, zfs_btree_index_t *where)
{
if (tree->bt_height == -1) {
ASSERT0(tree->bt_num_elems);
return (NULL);
}
return (zfs_btree_last_helper(tree, tree->bt_root, where));
}
/*
* This function contains the logic to find the next node in the tree. A
* helper function is used because there are multiple internal consumemrs of
* this logic. The done_func is used by zfs_btree_destroy_nodes to clean up each
* node after we've finished with it.
*/
static void *
zfs_btree_next_helper(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx,
void (*done_func)(zfs_btree_t *, zfs_btree_hdr_t *))
{
if (idx->bti_node == NULL) {
ASSERT3S(tree->bt_height, ==, -1);
return (NULL);
}
uint32_t offset = idx->bti_offset;
if (!zfs_btree_is_core(idx->bti_node)) {
/*
* When finding the next element of an element in a leaf,
* there are two cases. If the element isn't the last one in
* the leaf, in which case we just return the next element in
* the leaf. Otherwise, we need to traverse up our parents
* until we find one where our ancestor isn't the last child
* of its parent. Once we do, the next element is the
* separator after our ancestor in its parent.
*/
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
uint32_t new_off = offset + (idx->bti_before ? 0 : 1);
if (leaf->btl_hdr.bth_count > new_off) {
out_idx->bti_node = &leaf->btl_hdr;
out_idx->bti_offset = new_off;
out_idx->bti_before = B_FALSE;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
new_off) * tree->bt_elem_size);
}
zfs_btree_hdr_t *prev = &leaf->btl_hdr;
for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;
node != NULL; node = node->btc_hdr.bth_parent) {
zfs_btree_hdr_t *hdr = &node->btc_hdr;
ASSERT(zfs_btree_is_core(hdr));
uint32_t i = zfs_btree_find_parent_idx(tree, prev);
if (done_func != NULL)
done_func(tree, prev);
if (i == hdr->bth_count) {
prev = hdr;
continue;
}
out_idx->bti_node = hdr;
out_idx->bti_offset = i;
out_idx->bti_before = B_FALSE;
return (node->btc_elems + i * tree->bt_elem_size);
}
if (done_func != NULL)
done_func(tree, prev);
/*
* We've traversed all the way up and been at the end of the
* node every time, so this was the last element in the tree.
*/
return (NULL);
}
/* If we were before an element in a core node, return that element. */
ASSERT(zfs_btree_is_core(idx->bti_node));
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
if (idx->bti_before) {
out_idx->bti_before = B_FALSE;
return (node->btc_elems + offset * tree->bt_elem_size);
}
/*
* The next element from one in a core node is the first element in
* the subtree just to the right of the separator.
*/
zfs_btree_hdr_t *child = node->btc_children[offset + 1];
return (zfs_btree_first_helper(tree, child, out_idx));
}
/*
* Return the next valued node in the tree. The same address can be safely
* passed for idx and out_idx.
*/
void *
zfs_btree_next(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx)
{
return (zfs_btree_next_helper(tree, idx, out_idx, NULL));
}
/*
* Return the previous valued node in the tree. The same value can be safely
* passed for idx and out_idx.
*/
void *
zfs_btree_prev(zfs_btree_t *tree, const zfs_btree_index_t *idx,
zfs_btree_index_t *out_idx)
{
if (idx->bti_node == NULL) {
ASSERT3S(tree->bt_height, ==, -1);
return (NULL);
}
uint32_t offset = idx->bti_offset;
if (!zfs_btree_is_core(idx->bti_node)) {
/*
* When finding the previous element of an element in a leaf,
* there are two cases. If the element isn't the first one in
* the leaf, in which case we just return the previous element
* in the leaf. Otherwise, we need to traverse up our parents
* until we find one where our previous ancestor isn't the
* first child. Once we do, the previous element is the
* separator after our previous ancestor.
*/
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
if (offset != 0) {
out_idx->bti_node = &leaf->btl_hdr;
out_idx->bti_offset = offset - 1;
out_idx->bti_before = B_FALSE;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
offset - 1) * tree->bt_elem_size);
}
zfs_btree_hdr_t *prev = &leaf->btl_hdr;
for (zfs_btree_core_t *node = leaf->btl_hdr.bth_parent;
node != NULL; node = node->btc_hdr.bth_parent) {
zfs_btree_hdr_t *hdr = &node->btc_hdr;
ASSERT(zfs_btree_is_core(hdr));
uint32_t i = zfs_btree_find_parent_idx(tree, prev);
if (i == 0) {
prev = hdr;
continue;
}
out_idx->bti_node = hdr;
out_idx->bti_offset = i - 1;
out_idx->bti_before = B_FALSE;
return (node->btc_elems + (i - 1) * tree->bt_elem_size);
}
/*
* We've traversed all the way up and been at the start of the
* node every time, so this was the first node in the tree.
*/
return (NULL);
}
/*
* The previous element from one in a core node is the last element in
* the subtree just to the left of the separator.
*/
ASSERT(zfs_btree_is_core(idx->bti_node));
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
zfs_btree_hdr_t *child = node->btc_children[offset];
return (zfs_btree_last_helper(tree, child, out_idx));
}
/*
* Get the value at the provided index in the tree.
*
* Note that the value returned from this function can be mutated, but only
* if it will not change the ordering of the element with respect to any other
* elements that could be in the tree.
*/
void *
zfs_btree_get(zfs_btree_t *tree, zfs_btree_index_t *idx)
{
ASSERT(!idx->bti_before);
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(idx->bti_node)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)idx->bti_node;
return (leaf->btl_elems + (leaf->btl_hdr.bth_first +
idx->bti_offset) * size);
}
zfs_btree_core_t *node = (zfs_btree_core_t *)idx->bti_node;
return (node->btc_elems + idx->bti_offset * size);
}
/* Add the given value to the tree. Must not already be in the tree. */
void
zfs_btree_add(zfs_btree_t *tree, const void *node)
{
zfs_btree_index_t where = {0};
VERIFY3P(zfs_btree_find(tree, node, &where), ==, NULL);
zfs_btree_add_idx(tree, node, &where);
}
/* Helper function to free a tree node. */
static void
zfs_btree_node_destroy(zfs_btree_t *tree, zfs_btree_hdr_t *node)
{
tree->bt_num_nodes--;
if (!zfs_btree_is_core(node)) {
zfs_btree_leaf_free(tree, node);
} else {
kmem_free(node, sizeof (zfs_btree_core_t) +
BTREE_CORE_ELEMS * tree->bt_elem_size);
}
}
/*
* Remove the rm_hdr and the separator to its left from the parent node. The
* buffer that rm_hdr was stored in may already be freed, so its contents
* cannot be accessed.
*/
static void
zfs_btree_remove_from_node(zfs_btree_t *tree, zfs_btree_core_t *node,
zfs_btree_hdr_t *rm_hdr)
{
size_t size = tree->bt_elem_size;
uint32_t min_count = (BTREE_CORE_ELEMS / 2) - 1;
zfs_btree_hdr_t *hdr = &node->btc_hdr;
/*
* If the node is the root node and rm_hdr is one of two children,
* promote the other child to the root.
*/
if (hdr->bth_parent == NULL && hdr->bth_count <= 1) {
ASSERT3U(hdr->bth_count, ==, 1);
ASSERT3P(tree->bt_root, ==, node);
ASSERT3P(node->btc_children[1], ==, rm_hdr);
tree->bt_root = node->btc_children[0];
node->btc_children[0]->bth_parent = NULL;
zfs_btree_node_destroy(tree, hdr);
tree->bt_height--;
return;
}
uint32_t idx;
for (idx = 0; idx <= hdr->bth_count; idx++) {
if (node->btc_children[idx] == rm_hdr)
break;
}
ASSERT3U(idx, <=, hdr->bth_count);
/*
* If the node is the root or it has more than the minimum number of
* children, just remove the child and separator, and return.
*/
if (hdr->bth_parent == NULL ||
hdr->bth_count > min_count) {
/*
* Shift the element and children to the right of rm_hdr to
* the left by one spot.
*/
bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,
BSS_PARALLELOGRAM);
hdr->bth_count--;
zfs_btree_poison_node_at(tree, hdr, hdr->bth_count, 1);
return;
}
ASSERT3U(hdr->bth_count, ==, min_count);
/*
* Now we try to take a node from a neighbor. We check left, then
* right. If the neighbor exists and has more than the minimum number
* of elements, we move the separator between us and them to our
* node, move their closest element (last for left, first for right)
* to the separator, and move their closest child to our node. Along
* the way we need to collapse the gap made by idx, and (for our right
* neighbor) the gap made by removing their first element and child.
*
* Note: this logic currently doesn't support taking from a neighbor
* that isn't a sibling (i.e. a neighbor with a different
* parent). This isn't critical functionality, but may be worth
* implementing in the future for completeness' sake.
*/
zfs_btree_core_t *parent = hdr->bth_parent;
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :
parent->btc_children[parent_idx - 1]);
if (l_hdr != NULL && l_hdr->bth_count > min_count) {
/* We can take a node from the left neighbor. */
ASSERT(zfs_btree_is_core(l_hdr));
zfs_btree_core_t *neighbor = (zfs_btree_core_t *)l_hdr;
/*
* Start by shifting the elements and children in the current
* node to the right by one spot.
*/
bt_shift_core_right(tree, node, 0, idx - 1, BSS_TRAPEZOID);
/*
* Move the separator between node and neighbor to the first
* element slot in the current node.
*/
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, node->btc_elems, size);
/* Move the last child of neighbor to our first child slot. */
node->btc_children[0] =
neighbor->btc_children[l_hdr->bth_count];
node->btc_children[0]->bth_parent = node;
/* Move the last element of neighbor to the separator spot. */
uint8_t *take_elem = neighbor->btc_elems +
(l_hdr->bth_count - 1) * size;
bcpy(take_elem, separator, size);
l_hdr->bth_count--;
zfs_btree_poison_node_at(tree, l_hdr, l_hdr->bth_count, 1);
return;
}
zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?
NULL : parent->btc_children[parent_idx + 1]);
if (r_hdr != NULL && r_hdr->bth_count > min_count) {
/* We can take a node from the right neighbor. */
ASSERT(zfs_btree_is_core(r_hdr));
zfs_btree_core_t *neighbor = (zfs_btree_core_t *)r_hdr;
/*
* Shift elements in node left by one spot to overwrite rm_hdr
* and the separator before it.
*/
bt_shift_core_left(tree, node, idx, hdr->bth_count - idx,
BSS_PARALLELOGRAM);
/*
* Move the separator between node and neighbor to the last
* element spot in node.
*/
uint8_t *separator = parent->btc_elems + parent_idx * size;
bcpy(separator, node->btc_elems + (hdr->bth_count - 1) * size,
size);
/*
* Move the first child of neighbor to the last child spot in
* node.
*/
node->btc_children[hdr->bth_count] = neighbor->btc_children[0];
node->btc_children[hdr->bth_count]->bth_parent = node;
/* Move the first element of neighbor to the separator spot. */
uint8_t *take_elem = neighbor->btc_elems;
bcpy(take_elem, separator, size);
r_hdr->bth_count--;
/*
* Shift the elements and children of neighbor to cover the
* stolen elements.
*/
bt_shift_core_left(tree, neighbor, 1, r_hdr->bth_count,
BSS_TRAPEZOID);
zfs_btree_poison_node_at(tree, r_hdr, r_hdr->bth_count, 1);
return;
}
/*
* In this case, neither of our neighbors can spare an element, so we
* need to merge with one of them. We prefer the left one,
* arbitrarily. Move the separator into the leftmost merging node
* (which may be us or the left neighbor), and then move the right
* merging node's elements. Once that's done, we go back and delete
* the element we're removing. Finally, go into the parent and delete
* the right merging node and the separator. This may cause further
* merging.
*/
zfs_btree_hdr_t *new_rm_hdr, *keep_hdr;
uint32_t new_idx = idx;
if (l_hdr != NULL) {
keep_hdr = l_hdr;
new_rm_hdr = hdr;
new_idx += keep_hdr->bth_count + 1;
} else {
ASSERT3P(r_hdr, !=, NULL);
keep_hdr = hdr;
new_rm_hdr = r_hdr;
parent_idx++;
}
ASSERT(zfs_btree_is_core(keep_hdr));
ASSERT(zfs_btree_is_core(new_rm_hdr));
zfs_btree_core_t *keep = (zfs_btree_core_t *)keep_hdr;
zfs_btree_core_t *rm = (zfs_btree_core_t *)new_rm_hdr;
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++) {
zfs_btree_verify_poison_at(tree, keep_hdr,
keep_hdr->bth_count + i);
}
}
/* Move the separator into the left node. */
uint8_t *e_out = keep->btc_elems + keep_hdr->bth_count * size;
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, e_out, size);
keep_hdr->bth_count++;
/* Move all our elements and children into the left node. */
bt_transfer_core(tree, rm, 0, new_rm_hdr->bth_count, keep,
keep_hdr->bth_count, BSS_TRAPEZOID);
uint32_t old_count = keep_hdr->bth_count;
/* Update bookkeeping */
keep_hdr->bth_count += new_rm_hdr->bth_count;
ASSERT3U(keep_hdr->bth_count, ==, (min_count * 2) + 1);
/*
* Shift the element and children to the right of rm_hdr to
* the left by one spot.
*/
ASSERT3P(keep->btc_children[new_idx], ==, rm_hdr);
bt_shift_core_left(tree, keep, new_idx, keep_hdr->bth_count - new_idx,
BSS_PARALLELOGRAM);
keep_hdr->bth_count--;
/* Reparent all our children to point to the left node. */
zfs_btree_hdr_t **new_start = keep->btc_children +
old_count - 1;
for (uint32_t i = 0; i < new_rm_hdr->bth_count + 1; i++)
new_start[i]->bth_parent = keep;
for (uint32_t i = 0; i <= keep_hdr->bth_count; i++) {
ASSERT3P(keep->btc_children[i]->bth_parent, ==, keep);
ASSERT3P(keep->btc_children[i], !=, rm_hdr);
}
zfs_btree_poison_node_at(tree, keep_hdr, keep_hdr->bth_count, 1);
new_rm_hdr->bth_count = 0;
zfs_btree_remove_from_node(tree, parent, new_rm_hdr);
zfs_btree_node_destroy(tree, new_rm_hdr);
}
/* Remove the element at the specific location. */
void
zfs_btree_remove_idx(zfs_btree_t *tree, zfs_btree_index_t *where)
{
size_t size = tree->bt_elem_size;
zfs_btree_hdr_t *hdr = where->bti_node;
uint32_t idx = where->bti_offset;
ASSERT(!where->bti_before);
if (tree->bt_bulk != NULL) {
/*
* Leave bulk insert mode. Note that our index would be
* invalid after we correct the tree, so we copy the value
* we're planning to remove and find it again after
* bulk_finish.
*/
uint8_t *value = zfs_btree_get(tree, where);
uint8_t *tmp = kmem_alloc(size, KM_SLEEP);
bcpy(value, tmp, size);
zfs_btree_bulk_finish(tree);
VERIFY3P(zfs_btree_find(tree, tmp, where), !=, NULL);
kmem_free(tmp, size);
hdr = where->bti_node;
idx = where->bti_offset;
}
tree->bt_num_elems--;
/*
* If the element happens to be in a core node, we move a leaf node's
* element into its place and then remove the leaf node element. This
* makes the rebalance logic not need to be recursive both upwards and
* downwards.
*/
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
zfs_btree_hdr_t *left_subtree = node->btc_children[idx];
void *new_value = zfs_btree_last_helper(tree, left_subtree,
where);
ASSERT3P(new_value, !=, NULL);
bcpy(new_value, node->btc_elems + idx * size, size);
hdr = where->bti_node;
idx = where->bti_offset;
ASSERT(!where->bti_before);
}
/*
* First, we'll update the leaf's metadata. Then, we shift any
* elements after the idx to the left. After that, we rebalance if
* needed.
*/
ASSERT(!zfs_btree_is_core(hdr));
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
ASSERT3U(hdr->bth_count, >, 0);
uint32_t min_count = (tree->bt_leaf_cap / 2) - 1;
/*
* If we're over the minimum size or this is the root, just overwrite
* the value and return.
*/
if (hdr->bth_count > min_count || hdr->bth_parent == NULL) {
bt_shrink_leaf(tree, leaf, idx, 1);
if (hdr->bth_parent == NULL) {
ASSERT0(tree->bt_height);
if (hdr->bth_count == 0) {
tree->bt_root = NULL;
tree->bt_height--;
zfs_btree_node_destroy(tree, &leaf->btl_hdr);
}
}
zfs_btree_verify(tree);
return;
}
ASSERT3U(hdr->bth_count, ==, min_count);
/*
* Now we try to take a node from a sibling. We check left, then
* right. If they exist and have more than the minimum number of
* elements, we move the separator between us and them to our node
* and move their closest element (last for left, first for right) to
* the separator. Along the way we need to collapse the gap made by
* idx, and (for our right neighbor) the gap made by removing their
* first element.
*
* Note: this logic currently doesn't support taking from a neighbor
* that isn't a sibling. This isn't critical functionality, but may be
* worth implementing in the future for completeness' sake.
*/
zfs_btree_core_t *parent = hdr->bth_parent;
uint32_t parent_idx = zfs_btree_find_parent_idx(tree, hdr);
zfs_btree_hdr_t *l_hdr = (parent_idx == 0 ? NULL :
parent->btc_children[parent_idx - 1]);
if (l_hdr != NULL && l_hdr->bth_count > min_count) {
/* We can take a node from the left neighbor. */
ASSERT(!zfs_btree_is_core(l_hdr));
zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)l_hdr;
/*
* Move our elements back by one spot to make room for the
* stolen element and overwrite the element being removed.
*/
bt_shift_leaf(tree, leaf, 0, idx, 1, BSD_RIGHT);
/* Move the separator to our first spot. */
uint8_t *separator = parent->btc_elems + (parent_idx - 1) *
size;
bcpy(separator, leaf->btl_elems + hdr->bth_first * size, size);
/* Move our neighbor's last element to the separator. */
uint8_t *take_elem = neighbor->btl_elems +
(l_hdr->bth_first + l_hdr->bth_count - 1) * size;
bcpy(take_elem, separator, size);
/* Delete our neighbor's last element. */
bt_shrink_leaf(tree, neighbor, l_hdr->bth_count - 1, 1);
zfs_btree_verify(tree);
return;
}
zfs_btree_hdr_t *r_hdr = (parent_idx == parent->btc_hdr.bth_count ?
NULL : parent->btc_children[parent_idx + 1]);
if (r_hdr != NULL && r_hdr->bth_count > min_count) {
/* We can take a node from the right neighbor. */
ASSERT(!zfs_btree_is_core(r_hdr));
zfs_btree_leaf_t *neighbor = (zfs_btree_leaf_t *)r_hdr;
/*
* Move our elements after the element being removed forwards
* by one spot to make room for the stolen element and
* overwrite the element being removed.
*/
bt_shift_leaf(tree, leaf, idx + 1, hdr->bth_count - idx - 1,
1, BSD_LEFT);
/* Move the separator between us to our last spot. */
uint8_t *separator = parent->btc_elems + parent_idx * size;
bcpy(separator, leaf->btl_elems + (hdr->bth_first +
hdr->bth_count - 1) * size, size);
/* Move our neighbor's first element to the separator. */
uint8_t *take_elem = neighbor->btl_elems +
r_hdr->bth_first * size;
bcpy(take_elem, separator, size);
/* Delete our neighbor's first element. */
bt_shrink_leaf(tree, neighbor, 0, 1);
zfs_btree_verify(tree);
return;
}
/*
* In this case, neither of our neighbors can spare an element, so we
* need to merge with one of them. We prefer the left one, arbitrarily.
* After remove we move the separator into the leftmost merging node
* (which may be us or the left neighbor), and then move the right
* merging node's elements. Once that's done, we go back and delete
* the element we're removing. Finally, go into the parent and delete
* the right merging node and the separator. This may cause further
* merging.
*/
zfs_btree_hdr_t *rm_hdr, *k_hdr;
if (l_hdr != NULL) {
k_hdr = l_hdr;
rm_hdr = hdr;
} else {
ASSERT3P(r_hdr, !=, NULL);
k_hdr = hdr;
rm_hdr = r_hdr;
parent_idx++;
}
ASSERT(!zfs_btree_is_core(k_hdr));
ASSERT(!zfs_btree_is_core(rm_hdr));
ASSERT3U(k_hdr->bth_count, ==, min_count);
ASSERT3U(rm_hdr->bth_count, ==, min_count);
zfs_btree_leaf_t *keep = (zfs_btree_leaf_t *)k_hdr;
zfs_btree_leaf_t *rm = (zfs_btree_leaf_t *)rm_hdr;
if (zfs_btree_verify_intensity >= 5) {
for (uint32_t i = 0; i < rm_hdr->bth_count + 1; i++) {
zfs_btree_verify_poison_at(tree, k_hdr,
k_hdr->bth_count + i);
}
}
/*
* Remove the value from the node. It will go below the minimum,
* but we'll fix it in no time.
*/
bt_shrink_leaf(tree, leaf, idx, 1);
/* Prepare space for elements to be moved from the right. */
uint32_t k_count = k_hdr->bth_count;
bt_grow_leaf(tree, keep, k_count, 1 + rm_hdr->bth_count);
ASSERT3U(k_hdr->bth_count, ==, min_count * 2);
/* Move the separator into the first open spot. */
uint8_t *out = keep->btl_elems + (k_hdr->bth_first + k_count) * size;
uint8_t *separator = parent->btc_elems + (parent_idx - 1) * size;
bcpy(separator, out, size);
/* Move our elements to the left neighbor. */
bt_transfer_leaf(tree, rm, 0, rm_hdr->bth_count, keep, k_count + 1);
/* Remove the emptied node from the parent. */
zfs_btree_remove_from_node(tree, parent, rm_hdr);
zfs_btree_node_destroy(tree, rm_hdr);
zfs_btree_verify(tree);
}
/* Remove the given value from the tree. */
void
zfs_btree_remove(zfs_btree_t *tree, const void *value)
{
zfs_btree_index_t where = {0};
VERIFY3P(zfs_btree_find(tree, value, &where), !=, NULL);
zfs_btree_remove_idx(tree, &where);
}
/* Return the number of elements in the tree. */
ulong_t
zfs_btree_numnodes(zfs_btree_t *tree)
{
return (tree->bt_num_elems);
}
/*
* This function is used to visit all the elements in the tree before
* destroying the tree. This allows the calling code to perform any cleanup it
* needs to do. This is more efficient than just removing the first element
* over and over, because it removes all rebalancing. Once the destroy_nodes()
* function has been called, no other btree operations are valid until it
* returns NULL, which point the only valid operation is zfs_btree_destroy().
*
* example:
*
* zfs_btree_index_t *cookie = NULL;
* my_data_t *node;
*
* while ((node = zfs_btree_destroy_nodes(tree, &cookie)) != NULL)
* free(node->ptr);
* zfs_btree_destroy(tree);
*
*/
void *
zfs_btree_destroy_nodes(zfs_btree_t *tree, zfs_btree_index_t **cookie)
{
if (*cookie == NULL) {
if (tree->bt_height == -1)
return (NULL);
*cookie = kmem_alloc(sizeof (**cookie), KM_SLEEP);
return (zfs_btree_first(tree, *cookie));
}
void *rval = zfs_btree_next_helper(tree, *cookie, *cookie,
zfs_btree_node_destroy);
if (rval == NULL) {
tree->bt_root = NULL;
tree->bt_height = -1;
tree->bt_num_elems = 0;
kmem_free(*cookie, sizeof (**cookie));
tree->bt_bulk = NULL;
}
return (rval);
}
static void
zfs_btree_clear_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (zfs_btree_is_core(hdr)) {
zfs_btree_core_t *btc = (zfs_btree_core_t *)hdr;
for (uint32_t i = 0; i <= hdr->bth_count; i++)
zfs_btree_clear_helper(tree, btc->btc_children[i]);
}
zfs_btree_node_destroy(tree, hdr);
}
void
zfs_btree_clear(zfs_btree_t *tree)
{
if (tree->bt_root == NULL) {
ASSERT0(tree->bt_num_elems);
return;
}
zfs_btree_clear_helper(tree, tree->bt_root);
tree->bt_num_elems = 0;
tree->bt_root = NULL;
tree->bt_num_nodes = 0;
tree->bt_height = -1;
tree->bt_bulk = NULL;
}
void
zfs_btree_destroy(zfs_btree_t *tree)
{
ASSERT0(tree->bt_num_elems);
ASSERT3P(tree->bt_root, ==, NULL);
}
/* Verify that every child of this node has the correct parent pointer. */
static void
zfs_btree_verify_pointers_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (!zfs_btree_is_core(hdr))
return;
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
VERIFY3P(node->btc_children[i]->bth_parent, ==, hdr);
zfs_btree_verify_pointers_helper(tree, node->btc_children[i]);
}
}
/* Verify that every node has the correct parent pointer. */
static void
zfs_btree_verify_pointers(zfs_btree_t *tree)
{
if (tree->bt_height == -1) {
VERIFY3P(tree->bt_root, ==, NULL);
return;
}
VERIFY3P(tree->bt_root->bth_parent, ==, NULL);
zfs_btree_verify_pointers_helper(tree, tree->bt_root);
}
/*
* Verify that all the current node and its children satisfy the count
* invariants, and return the total count in the subtree rooted in this node.
*/
static uint64_t
zfs_btree_verify_counts_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
if (!zfs_btree_is_core(hdr)) {
if (tree->bt_root != hdr && tree->bt_bulk &&
hdr != &tree->bt_bulk->btl_hdr) {
VERIFY3U(hdr->bth_count, >=, tree->bt_leaf_cap / 2 - 1);
}
return (hdr->bth_count);
} else {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
uint64_t ret = hdr->bth_count;
if (tree->bt_root != hdr && tree->bt_bulk == NULL)
VERIFY3P(hdr->bth_count, >=, BTREE_CORE_ELEMS / 2 - 1);
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
ret += zfs_btree_verify_counts_helper(tree,
node->btc_children[i]);
}
return (ret);
}
}
/*
* Verify that all nodes satisfy the invariants and that the total number of
* elements is correct.
*/
static void
zfs_btree_verify_counts(zfs_btree_t *tree)
{
EQUIV(tree->bt_num_elems == 0, tree->bt_height == -1);
if (tree->bt_height == -1) {
return;
}
VERIFY3P(zfs_btree_verify_counts_helper(tree, tree->bt_root), ==,
tree->bt_num_elems);
}
/*
* Check that the subtree rooted at this node has a uniform height. Returns
* the number of nodes under this node, to help verify bt_num_nodes.
*/
static uint64_t
zfs_btree_verify_height_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr,
int32_t height)
{
if (!zfs_btree_is_core(hdr)) {
VERIFY0(height);
return (1);
}
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
uint64_t ret = 1;
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
ret += zfs_btree_verify_height_helper(tree,
node->btc_children[i], height - 1);
}
return (ret);
}
/*
* Check that the tree rooted at this node has a uniform height, and that the
* bt_height in the tree is correct.
*/
static void
zfs_btree_verify_height(zfs_btree_t *tree)
{
EQUIV(tree->bt_height == -1, tree->bt_root == NULL);
if (tree->bt_height == -1) {
return;
}
VERIFY3U(zfs_btree_verify_height_helper(tree, tree->bt_root,
tree->bt_height), ==, tree->bt_num_nodes);
}
/*
* Check that the elements in this node are sorted, and that if this is a core
* node, the separators are properly between the subtrees they separaate and
* that the children also satisfy this requirement.
*/
static void
zfs_btree_verify_order_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(hdr)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
for (uint32_t i = 1; i < hdr->bth_count; i++) {
VERIFY3S(tree->bt_compar(leaf->btl_elems +
(hdr->bth_first + i - 1) * size,
leaf->btl_elems +
(hdr->bth_first + i) * size), ==, -1);
}
return;
}
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (uint32_t i = 1; i < hdr->bth_count; i++) {
VERIFY3S(tree->bt_compar(node->btc_elems + (i - 1) * size,
node->btc_elems + i * size), ==, -1);
}
for (uint32_t i = 0; i < hdr->bth_count; i++) {
uint8_t *left_child_last = NULL;
zfs_btree_hdr_t *left_child_hdr = node->btc_children[i];
if (zfs_btree_is_core(left_child_hdr)) {
zfs_btree_core_t *left_child =
(zfs_btree_core_t *)left_child_hdr;
left_child_last = left_child->btc_elems +
(left_child_hdr->bth_count - 1) * size;
} else {
zfs_btree_leaf_t *left_child =
(zfs_btree_leaf_t *)left_child_hdr;
left_child_last = left_child->btl_elems +
(left_child_hdr->bth_first +
left_child_hdr->bth_count - 1) * size;
}
int comp = tree->bt_compar(node->btc_elems + i * size,
left_child_last);
if (comp <= 0) {
panic("btree: compar returned %d (expected 1) at "
"%px %d: compar(%px, %px)", comp, node, i,
node->btc_elems + i * size, left_child_last);
}
uint8_t *right_child_first = NULL;
zfs_btree_hdr_t *right_child_hdr = node->btc_children[i + 1];
if (zfs_btree_is_core(right_child_hdr)) {
zfs_btree_core_t *right_child =
(zfs_btree_core_t *)right_child_hdr;
right_child_first = right_child->btc_elems;
} else {
zfs_btree_leaf_t *right_child =
(zfs_btree_leaf_t *)right_child_hdr;
right_child_first = right_child->btl_elems +
right_child_hdr->bth_first * size;
}
comp = tree->bt_compar(node->btc_elems + i * size,
right_child_first);
if (comp >= 0) {
panic("btree: compar returned %d (expected -1) at "
"%px %d: compar(%px, %px)", comp, node, i,
node->btc_elems + i * size, right_child_first);
}
}
for (uint32_t i = 0; i <= hdr->bth_count; i++)
zfs_btree_verify_order_helper(tree, node->btc_children[i]);
}
/* Check that all elements in the tree are in sorted order. */
static void
zfs_btree_verify_order(zfs_btree_t *tree)
{
EQUIV(tree->bt_height == -1, tree->bt_root == NULL);
if (tree->bt_height == -1) {
return;
}
zfs_btree_verify_order_helper(tree, tree->bt_root);
}
#ifdef ZFS_DEBUG
/* Check that all unused memory is poisoned correctly. */
static void
zfs_btree_verify_poison_helper(zfs_btree_t *tree, zfs_btree_hdr_t *hdr)
{
size_t size = tree->bt_elem_size;
if (!zfs_btree_is_core(hdr)) {
zfs_btree_leaf_t *leaf = (zfs_btree_leaf_t *)hdr;
for (size_t i = 0; i < hdr->bth_first * size; i++)
VERIFY3U(leaf->btl_elems[i], ==, 0x0f);
size_t esize = tree->bt_leaf_size -
offsetof(zfs_btree_leaf_t, btl_elems);
for (size_t i = (hdr->bth_first + hdr->bth_count) * size;
i < esize; i++)
VERIFY3U(leaf->btl_elems[i], ==, 0x0f);
} else {
zfs_btree_core_t *node = (zfs_btree_core_t *)hdr;
for (size_t i = hdr->bth_count * size;
i < BTREE_CORE_ELEMS * size; i++)
VERIFY3U(node->btc_elems[i], ==, 0x0f);
for (uint32_t i = hdr->bth_count + 1; i <= BTREE_CORE_ELEMS;
i++) {
VERIFY3P(node->btc_children[i], ==,
(zfs_btree_hdr_t *)BTREE_POISON);
}
for (uint32_t i = 0; i <= hdr->bth_count; i++) {
zfs_btree_verify_poison_helper(tree,
node->btc_children[i]);
}
}
}
#endif
/* Check that unused memory in the tree is still poisoned. */
static void
zfs_btree_verify_poison(zfs_btree_t *tree)
{
#ifdef ZFS_DEBUG
if (tree->bt_height == -1)
return;
zfs_btree_verify_poison_helper(tree, tree->bt_root);
#endif
}
void
zfs_btree_verify(zfs_btree_t *tree)
{
if (zfs_btree_verify_intensity == 0)
return;
zfs_btree_verify_height(tree);
if (zfs_btree_verify_intensity == 1)
return;
zfs_btree_verify_pointers(tree);
if (zfs_btree_verify_intensity == 2)
return;
zfs_btree_verify_counts(tree);
if (zfs_btree_verify_intensity == 3)
return;
zfs_btree_verify_order(tree);
if (zfs_btree_verify_intensity == 4)
return;
zfs_btree_verify_poison(tree);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, btree_verify_intensity, UINT, ZMOD_RW,
"Enable btree verification. Levels above 4 require ZFS be built "
"with debugging");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dataset_kstats.c b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
index 57b8faf213eb..767a461e0026 100644
--- a/sys/contrib/openzfs/module/zfs/dataset_kstats.c
+++ b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
@@ -1,239 +1,243 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 by Delphix. All rights reserved.
* Copyright (c) 2018 Datto Inc.
*/
#include <sys/dataset_kstats.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
static dataset_kstat_values_t empty_dataset_kstats = {
{ "dataset_name", KSTAT_DATA_STRING },
{ "writes", KSTAT_DATA_UINT64 },
{ "nwritten", KSTAT_DATA_UINT64 },
{ "reads", KSTAT_DATA_UINT64 },
{ "nread", KSTAT_DATA_UINT64 },
{ "nunlinks", KSTAT_DATA_UINT64 },
{ "nunlinked", KSTAT_DATA_UINT64 },
{
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
- { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }
+ { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }
}
};
static int
dataset_kstats_update(kstat_t *ksp, int rw)
{
dataset_kstats_t *dk = ksp->ks_private;
dataset_kstat_values_t *dkv = ksp->ks_data;
ASSERT3P(dk->dk_kstats->ks_data, ==, dkv);
if (rw == KSTAT_WRITE)
return (EACCES);
dkv->dkv_writes.value.ui64 =
wmsum_value(&dk->dk_sums.dss_writes);
dkv->dkv_nwritten.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nwritten);
dkv->dkv_reads.value.ui64 =
wmsum_value(&dk->dk_sums.dss_reads);
dkv->dkv_nread.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nread);
dkv->dkv_nunlinks.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nunlinks);
dkv->dkv_nunlinked.value.ui64 =
wmsum_value(&dk->dk_sums.dss_nunlinked);
zil_kstat_values_update(&dkv->dkv_zil_stats, &dk->dk_zil_sums);
return (0);
}
int
dataset_kstats_create(dataset_kstats_t *dk, objset_t *objset)
{
/*
* There should not be anything wrong with having kstats for
* snapshots. Since we are not sure how useful they would be
* though nor how much their memory overhead would matter in
* a filesystem with many snapshots, we skip them for now.
*/
if (dmu_objset_is_snapshot(objset))
return (0);
/*
* At the time of this writing, KSTAT_STRLEN is 255 in Linux,
* and the spa_name can theoretically be up to 256 characters.
* In reality though the spa_name can be 240 characters max
* [see origin directory name check in pool_namecheck()]. Thus,
* the naming scheme for the module name below should not cause
* any truncations. In the event that a truncation does happen
* though, due to some future change, we silently skip creating
* the kstat and log the event.
*/
char kstat_module_name[KSTAT_STRLEN];
int n = snprintf(kstat_module_name, sizeof (kstat_module_name),
"zfs/%s", spa_name(dmu_objset_spa(objset)));
if (n < 0) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
" snprintf() for kstat module name returned %d",
(unsigned long long)dmu_objset_id(objset), n);
return (SET_ERROR(EINVAL));
} else if (n >= KSTAT_STRLEN) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
"kstat module name length (%d) exceeds limit (%d)",
(unsigned long long)dmu_objset_id(objset),
n, KSTAT_STRLEN);
return (SET_ERROR(ENAMETOOLONG));
}
char kstat_name[KSTAT_STRLEN];
n = snprintf(kstat_name, sizeof (kstat_name), "objset-0x%llx",
(unsigned long long)dmu_objset_id(objset));
if (n < 0) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
" snprintf() for kstat name returned %d",
(unsigned long long)dmu_objset_id(objset), n);
return (SET_ERROR(EINVAL));
} else if (n >= KSTAT_STRLEN) {
zfs_dbgmsg("failed to create dataset kstat for objset %lld: "
"kstat name length (%d) exceeds limit (%d)",
(unsigned long long)dmu_objset_id(objset),
n, KSTAT_STRLEN);
return (SET_ERROR(ENAMETOOLONG));
}
kstat_t *kstat = kstat_create(kstat_module_name, 0, kstat_name,
"dataset", KSTAT_TYPE_NAMED,
sizeof (empty_dataset_kstats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (kstat == NULL)
return (SET_ERROR(ENOMEM));
dataset_kstat_values_t *dk_kstats =
kmem_alloc(sizeof (empty_dataset_kstats), KM_SLEEP);
memcpy(dk_kstats, &empty_dataset_kstats,
sizeof (empty_dataset_kstats));
char *ds_name = kmem_zalloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(objset->os_dsl_dataset, ds_name);
KSTAT_NAMED_STR_PTR(&dk_kstats->dkv_ds_name) = ds_name;
KSTAT_NAMED_STR_BUFLEN(&dk_kstats->dkv_ds_name) =
ZFS_MAX_DATASET_NAME_LEN;
kstat->ks_data = dk_kstats;
kstat->ks_update = dataset_kstats_update;
kstat->ks_private = dk;
kstat->ks_data_size += ZFS_MAX_DATASET_NAME_LEN;
wmsum_init(&dk->dk_sums.dss_writes, 0);
wmsum_init(&dk->dk_sums.dss_nwritten, 0);
wmsum_init(&dk->dk_sums.dss_reads, 0);
wmsum_init(&dk->dk_sums.dss_nread, 0);
wmsum_init(&dk->dk_sums.dss_nunlinks, 0);
wmsum_init(&dk->dk_sums.dss_nunlinked, 0);
zil_sums_init(&dk->dk_zil_sums);
dk->dk_kstats = kstat;
kstat_install(kstat);
return (0);
}
void
dataset_kstats_destroy(dataset_kstats_t *dk)
{
if (dk->dk_kstats == NULL)
return;
dataset_kstat_values_t *dkv = dk->dk_kstats->ks_data;
kstat_delete(dk->dk_kstats);
dk->dk_kstats = NULL;
kmem_free(KSTAT_NAMED_STR_PTR(&dkv->dkv_ds_name),
KSTAT_NAMED_STR_BUFLEN(&dkv->dkv_ds_name));
kmem_free(dkv, sizeof (empty_dataset_kstats));
wmsum_fini(&dk->dk_sums.dss_writes);
wmsum_fini(&dk->dk_sums.dss_nwritten);
wmsum_fini(&dk->dk_sums.dss_reads);
wmsum_fini(&dk->dk_sums.dss_nread);
wmsum_fini(&dk->dk_sums.dss_nunlinks);
wmsum_fini(&dk->dk_sums.dss_nunlinked);
zil_sums_fini(&dk->dk_zil_sums);
}
void
dataset_kstats_update_write_kstats(dataset_kstats_t *dk,
int64_t nwritten)
{
ASSERT3S(nwritten, >=, 0);
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_writes, 1);
wmsum_add(&dk->dk_sums.dss_nwritten, nwritten);
}
void
dataset_kstats_update_read_kstats(dataset_kstats_t *dk,
int64_t nread)
{
ASSERT3S(nread, >=, 0);
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_reads, 1);
wmsum_add(&dk->dk_sums.dss_nread, nread);
}
void
dataset_kstats_update_nunlinks_kstat(dataset_kstats_t *dk, int64_t delta)
{
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_nunlinks, delta);
}
void
dataset_kstats_update_nunlinked_kstat(dataset_kstats_t *dk, int64_t delta)
{
if (dk->dk_kstats == NULL)
return;
wmsum_add(&dk->dk_sums.dss_nunlinked, delta);
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index c19ebf424953..778b18817eef 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -1,3084 +1,3083 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/cred.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dnode.h>
#include <sys/dbuf.h>
#include <sys/zvol.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dmu_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/sa.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/vdev.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/spa_impl.h>
#include <sys/dmu_recv.h>
#include <sys/zfs_project.h>
#include "zfs_namecheck.h"
#include <sys/vdev_impl.h>
#include <sys/arc.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
* before it can be safely accessed.
*/
krwlock_t os_lock;
/*
* Tunable to overwrite the maximum number of threads for the parallelization
* of dmu_objset_find_dp, needed to speed up the import of pools with many
* datasets.
* Default is 4 times the number of leaf vdevs.
*/
static const int dmu_find_threads = 0;
/*
* Backfill lower metadnode objects after this many have been freed.
* Backfilling negatively impacts object creation rates, so only do it
* if there are enough holes to fill.
*/
static const int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT;
static const char *upgrade_tag = "upgrade_tag";
static void dmu_objset_find_dp_cb(void *arg);
static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb);
static void dmu_objset_upgrade_stop(objset_t *os);
void
dmu_objset_init(void)
{
rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
}
void
dmu_objset_fini(void)
{
rw_destroy(&os_lock);
}
spa_t *
dmu_objset_spa(objset_t *os)
{
return (os->os_spa);
}
zilog_t *
dmu_objset_zil(objset_t *os)
{
return (os->os_zil);
}
dsl_pool_t *
dmu_objset_pool(objset_t *os)
{
dsl_dataset_t *ds;
if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
return (ds->ds_dir->dd_pool);
else
return (spa_get_dsl(os->os_spa));
}
dsl_dataset_t *
dmu_objset_ds(objset_t *os)
{
return (os->os_dsl_dataset);
}
dmu_objset_type_t
dmu_objset_type(objset_t *os)
{
return (os->os_phys->os_type);
}
void
dmu_objset_name(objset_t *os, char *buf)
{
dsl_dataset_name(os->os_dsl_dataset, buf);
}
uint64_t
dmu_objset_id(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
return (ds ? ds->ds_object : 0);
}
uint64_t
dmu_objset_dnodesize(objset_t *os)
{
return (os->os_dnodesize);
}
zfs_sync_type_t
dmu_objset_syncprop(objset_t *os)
{
return (os->os_sync);
}
zfs_logbias_op_t
dmu_objset_logbias(objset_t *os)
{
return (os->os_logbias);
}
static void
checksum_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
}
static void
compression_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval != ZIO_COMPRESS_INHERIT);
os->os_compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ALGO(newval), ZIO_COMPRESS_ON);
os->os_complevel = zio_complevel_select(os->os_spa, os->os_compress,
ZIO_COMPRESS_LEVEL(newval), ZIO_COMPLEVEL_DEFAULT);
}
static void
copies_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval > 0);
ASSERT(newval <= spa_max_replication(os->os_spa));
os->os_copies = newval;
}
static void
dedup_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
spa_t *spa = os->os_spa;
enum zio_checksum checksum;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
}
static void
primary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_primary_cache = newval;
}
static void
secondary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_secondary_cache = newval;
}
static void
sync_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
newval == ZFS_SYNC_DISABLED);
os->os_sync = newval;
if (os->os_zil)
zil_set_sync(os->os_zil, newval);
}
static void
redundant_metadata_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
newval == ZFS_REDUNDANT_METADATA_MOST ||
newval == ZFS_REDUNDANT_METADATA_SOME ||
newval == ZFS_REDUNDANT_METADATA_NONE);
os->os_redundant_metadata = newval;
}
static void
dnodesize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
switch (newval) {
case ZFS_DNSIZE_LEGACY:
os->os_dnodesize = DNODE_MIN_SIZE;
break;
case ZFS_DNSIZE_AUTO:
/*
* Choose a dnode size that will work well for most
* workloads if the user specified "auto". Future code
* improvements could dynamically select a dnode size
* based on observed workload patterns.
*/
os->os_dnodesize = DNODE_MIN_SIZE * 2;
break;
case ZFS_DNSIZE_1K:
case ZFS_DNSIZE_2K:
case ZFS_DNSIZE_4K:
case ZFS_DNSIZE_8K:
case ZFS_DNSIZE_16K:
os->os_dnodesize = newval;
break;
}
}
static void
smallblk_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval <= SPA_MAXBLOCKSIZE);
ASSERT(ISP2(newval));
os->os_zpl_special_smallblock = newval;
}
static void
logbias_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
newval == ZFS_LOGBIAS_THROUGHPUT);
os->os_logbias = newval;
if (os->os_zil)
zil_set_logbias(os->os_zil, newval);
}
static void
recordsize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
os->os_recordsize = newval;
}
void
dmu_objset_byteswap(void *buf, size_t size)
{
objset_phys_t *osp = buf;
ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 ||
size == sizeof (objset_phys_t));
dnode_byteswap(&osp->os_meta_dnode);
byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
osp->os_type = BSWAP_64(osp->os_type);
osp->os_flags = BSWAP_64(osp->os_flags);
if (size >= OBJSET_PHYS_SIZE_V2) {
dnode_byteswap(&osp->os_userused_dnode);
dnode_byteswap(&osp->os_groupused_dnode);
if (size >= sizeof (objset_phys_t))
dnode_byteswap(&osp->os_projectused_dnode);
}
}
/*
* The hash is a CRC-based hash of the objset_t pointer and the object number.
*/
static uint64_t
dnode_hash(const objset_t *os, uint64_t obj)
{
uintptr_t osv = (uintptr_t)os;
uint64_t crc = -1ULL;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
/*
* The low 6 bits of the pointer don't have much entropy, because
* the objset_t is larger than 2^6 bytes long.
*/
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
crc ^= (osv>>14) ^ (obj>>24);
return (crc);
}
static unsigned int
dnode_multilist_index_func(multilist_t *ml, void *obj)
{
dnode_t *dn = obj;
/*
* The low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dnode_hash(dn->dn_objset, dn->dn_object) %
multilist_get_num_sublists(ml));
}
static inline boolean_t
dmu_os_is_l2cacheable(objset_t *os)
{
if (os->os_secondary_cache == ZFS_CACHE_ALL ||
os->os_secondary_cache == ZFS_CACHE_METADATA) {
if (l2arc_exclude_special == 0)
return (B_TRUE);
blkptr_t *bp = os->os_rootbp;
if (bp == NULL || BP_IS_HOLE(bp))
return (B_FALSE);
uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
vdev_t *rvd = os->os_spa->spa_root_vdev;
vdev_t *vd = NULL;
if (vdev < rvd->vdev_children)
vd = rvd->vdev_child[vdev];
if (vd == NULL)
return (B_TRUE);
if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Instantiates the objset_t in-memory structure corresponding to the
* objset_phys_t that's pointed to by the specified blkptr_t.
*/
int
dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
objset_t **osp)
{
objset_t *os;
int i, err;
ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
ASSERT(!BP_IS_REDACTED(bp));
/*
* We need the pool config lock to get properties.
*/
ASSERT(ds == NULL || dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* The $ORIGIN dataset (if it exists) doesn't have an associated
* objset, so there's no reason to open it. The $ORIGIN dataset
* will not exist on pools older than SPA_VERSION_ORIGIN.
*/
if (ds != NULL && spa_get_dsl(spa) != NULL &&
spa_get_dsl(spa)->dp_origin_snap != NULL) {
ASSERT3P(ds->ds_dir, !=,
spa_get_dsl(spa)->dp_origin_snap->ds_dir);
}
os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os->os_dsl_dataset = ds;
os->os_spa = spa;
os->os_rootbp = bp;
if (!BP_IS_HOLE(os->os_rootbp)) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int size;
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (dmu_os_is_l2cacheable(os))
aflags |= ARC_FLAG_L2CACHE;
if (ds != NULL && ds->ds_dir->dd_crypto_obj != 0) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT(BP_IS_AUTHENTICATED(bp));
zio_flags |= ZIO_FLAG_RAW;
}
dprintf_bp(os->os_rootbp, "reading %s", "");
err = arc_read(NULL, spa, os->os_rootbp,
arc_getbuf_func, &os->os_phys_buf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (err != 0) {
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
if (spa_version(spa) < SPA_VERSION_USERSPACE)
size = OBJSET_PHYS_SIZE_V1;
else if (!spa_feature_is_enabled(spa,
SPA_FEATURE_PROJECT_QUOTA))
size = OBJSET_PHYS_SIZE_V2;
else
size = sizeof (objset_phys_t);
/* Increase the blocksize if we are permitted. */
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
memset(buf->b_data, 0, size);
memcpy(buf->b_data, os->os_phys_buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
}
os->os_phys = os->os_phys_buf->b_data;
os->os_flags = os->os_phys->os_flags;
} else {
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1;
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
memset(os->os_phys, 0, size);
}
/*
* These properties will be filled in by the logic in zfs_get_zplprop()
* when they are queried for the first time.
*/
os->os_version = OBJSET_PROP_UNINITIALIZED;
os->os_normalization = OBJSET_PROP_UNINITIALIZED;
os->os_utf8only = OBJSET_PROP_UNINITIALIZED;
os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED;
/*
* Note: the changed_cb will be called once before the register
* func returns, thus changing the checksum/compression from the
* default (fletcher2/off). Snapshots don't need to know about
* checksum/compression/copies.
*/
if (ds != NULL) {
os->os_encrypted = (ds->ds_dir->dd_crypto_obj != 0);
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
primary_cache_changed_cb, os);
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
secondary_cache_changed_cb, os);
}
if (!ds->ds_is_snapshot) {
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_CHECKSUM),
checksum_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
compression_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COPIES),
copies_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEDUP),
dedup_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_LOGBIAS),
logbias_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SYNC),
sync_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_REDUNDANT_METADATA),
redundant_metadata_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
recordsize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DNODESIZE),
dnodesize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_SPECIAL_SMALL_BLOCKS),
smallblk_changed_cb, os);
}
}
if (err != 0) {
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
kmem_free(os, sizeof (objset_t));
return (err);
}
} else {
/* It's the meta-objset. */
os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
os->os_compress = ZIO_COMPRESS_ON;
os->os_complevel = ZIO_COMPLEVEL_DEFAULT;
os->os_encrypted = B_FALSE;
os->os_copies = spa_max_replication(spa);
os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
os->os_dedup_verify = B_FALSE;
os->os_logbias = ZFS_LOGBIAS_LATENCY;
os->os_sync = ZFS_SYNC_STANDARD;
os->os_primary_cache = ZFS_CACHE_ALL;
os->os_secondary_cache = ZFS_CACHE_ALL;
os->os_dnodesize = DNODE_MIN_SIZE;
}
if (ds == NULL || !ds->ds_is_snapshot)
os->os_zil_header = os->os_phys->os_zil_header;
os->os_zil = zil_alloc(os, &os->os_zil_header);
for (i = 0; i < TXG_SIZE; i++) {
multilist_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[i]),
dnode_multilist_index_func);
}
list_create(&os->os_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_link));
list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
list_link_init(&os->os_evicting_node);
mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
os->os_obj_next_percpu_len = boot_ncpus;
os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
dnode_special_open(os, &os->os_phys->os_meta_dnode,
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) {
dnode_special_open(os, &os->os_phys->os_userused_dnode,
DMU_USERUSED_OBJECT, &os->os_userused_dnode);
dnode_special_open(os, &os->os_phys->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf))
dnode_special_open(os,
&os->os_phys->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode);
}
mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL);
*osp = os;
return (0);
}
int
dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
{
int err = 0;
/*
* We need the pool_config lock to manipulate the dsl_dataset_t.
* Even if the dataset is long-held, we need the pool_config lock
* to open the objset, as it needs to get properties.
*/
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_opening_lock);
if (ds->ds_objset == NULL) {
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
ds, dsl_dataset_get_blkptr(ds), &os);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (err == 0) {
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_objset == NULL);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
}
*osp = ds->ds_objset;
mutex_exit(&ds->ds_opening_lock);
return (err);
}
/*
* Holds the pool while the objset is held. Therefore only one objset
* can be held at a time.
*/
int
dmu_objset_hold_flags(const char *name, boolean_t decrypt, const void *tag,
objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, tag, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_flags(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
}
return (err);
}
int
dmu_objset_hold(const char *name, const void *tag, objset_t **osp)
{
return (dmu_objset_hold_flags(name, B_FALSE, tag, osp));
}
static int
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
(void) tag;
int err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
return (err);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
return (SET_ERROR(EROFS));
} else if (!readonly && decrypt &&
dsl_dir_incompatible_encryption_version(ds->ds_dir)) {
return (SET_ERROR(EROFS));
}
/* if we are decrypting, we can now check MACs in os->os_phys_buf */
if (decrypt && arc_is_unauthenticated((*osp)->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform((*osp)->os_phys_buf, (*osp)->os_spa,
&zb, B_FALSE);
if (err != 0)
return (err);
ASSERT0(arc_is_unauthenticated((*osp)->os_phys_buf));
}
return (0);
}
/*
* dsl_pool must not be held when this is called.
* Upon successful return, there will be a longhold on the dataset,
* and the dsl_pool will not be held.
*/
int
dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_own(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
dsl_pool_rele(dp, FTAG);
return (err);
}
/*
* User accounting requires the dataset to be decrypted and rw.
* We also don't begin user accounting during claiming to help
* speed up pool import times and to keep this txg reserved
* completely for recovery work.
*/
if (!readonly && !dp->dp_spa->spa_claiming &&
(ds->ds_dir->dd_crypto_obj == 0 || decrypt)) {
if (dmu_objset_userobjspace_upgradable(*osp) ||
dmu_objset_projectquota_upgradable(*osp)) {
dmu_objset_id_quota_upgrade(*osp);
} else if (dmu_objset_userused_enabled(*osp)) {
dmu_objset_userspace_upgrade(*osp);
}
}
dsl_pool_rele(dp, FTAG);
return (0);
}
int
dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_dataset_own_obj(dp, obj, flags, tag, &ds);
if (err != 0)
return (err);
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
return (err);
}
return (0);
}
void
dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
dsl_pool_t *dp = dmu_objset_pool(os);
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
dsl_dataset_rele_flags(os->os_dsl_dataset, flags, tag);
dsl_pool_rele(dp, tag);
}
void
dmu_objset_rele(objset_t *os, const void *tag)
{
dmu_objset_rele_flags(os, B_FALSE, tag);
}
/*
* When we are called, os MUST refer to an objset associated with a dataset
* that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
* == tag. We will then release and reacquire ownership of the dataset while
* holding the pool config_rwlock to avoid intervening namespace or ownership
* changes may occur.
*
* This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
* release the hold on its dataset and acquire a new one on the dataset of the
* same name so that it can be partially torn down and reconstructed.
*/
void
dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
boolean_t decrypt, const void *tag)
{
dsl_pool_t *dp;
char name[ZFS_MAX_DATASET_NAME_LEN];
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
VERIFY3P(ds, !=, NULL);
VERIFY3P(ds->ds_owner, ==, tag);
VERIFY(dsl_dataset_long_held(ds));
dsl_dataset_name(ds, name);
dp = ds->ds_dir->dd_pool;
dsl_pool_config_enter(dp, FTAG);
dsl_dataset_disown(ds, flags, tag);
VERIFY0(dsl_dataset_own(dp, name, flags, tag, newds));
dsl_pool_config_exit(dp, FTAG);
}
void
dmu_objset_disown(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
/*
* Stop upgrading thread
*/
dmu_objset_upgrade_stop(os);
dsl_dataset_disown(os->os_dsl_dataset, flags, tag);
}
void
dmu_objset_evict_dbufs(objset_t *os)
{
dnode_t *dn_marker;
dnode_t *dn;
dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP);
mutex_enter(&os->os_lock);
dn = list_head(&os->os_dnodes);
while (dn != NULL) {
/*
* Skip dnodes without holds. We have to do this dance
* because dnode_add_ref() only works if there is already a
* hold. If the dnode has no holds, then it has no dbufs.
*/
if (dnode_add_ref(dn, FTAG)) {
list_insert_after(&os->os_dnodes, dn, dn_marker);
mutex_exit(&os->os_lock);
dnode_evict_dbufs(dn);
dnode_rele(dn, FTAG);
mutex_enter(&os->os_lock);
dn = list_next(&os->os_dnodes, dn_marker);
list_remove(&os->os_dnodes, dn_marker);
} else {
dn = list_next(&os->os_dnodes, dn);
}
}
mutex_exit(&os->os_lock);
kmem_free(dn_marker, sizeof (dnode_t));
if (DMU_USERUSED_DNODE(os) != NULL) {
if (DMU_PROJECTUSED_DNODE(os) != NULL)
dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os));
dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
}
dnode_evict_dbufs(DMU_META_DNODE(os));
}
/*
* Objset eviction processing is split into into two pieces.
* The first marks the objset as evicting, evicts any dbufs that
* have a refcount of zero, and then queues up the objset for the
* second phase of eviction. Once os->os_dnodes has been cleared by
* dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
* The second phase closes the special dnodes, dequeues the objset from
* the list of those undergoing eviction, and finally frees the objset.
*
* NOTE: Due to asynchronous eviction processing (invocation of
* dnode_buf_pageout()), it is possible for the meta dnode for the
* objset to have no holds even though os->os_dnodes is not empty.
*/
void
dmu_objset_evict(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!dmu_objset_is_dirty(os, t));
if (ds)
dsl_prop_unregister_all(ds, os);
if (os->os_sa)
sa_tear_down(os);
dmu_objset_evict_dbufs(os);
mutex_enter(&os->os_lock);
spa_evicting_os_register(os->os_spa, os);
if (list_is_empty(&os->os_dnodes)) {
mutex_exit(&os->os_lock);
dmu_objset_evict_done(os);
} else {
mutex_exit(&os->os_lock);
}
}
void
dmu_objset_evict_done(objset_t *os)
{
ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
dnode_special_close(&os->os_meta_dnode);
if (DMU_USERUSED_DNODE(os)) {
if (DMU_PROJECTUSED_DNODE(os))
dnode_special_close(&os->os_projectused_dnode);
dnode_special_close(&os->os_userused_dnode);
dnode_special_close(&os->os_groupused_dnode);
}
zil_free(os->os_zil);
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
/*
* This is a barrier to prevent the objset from going away in
* dnode_move() until we can safely ensure that the objset is still in
* use. We consider the objset valid before the barrier and invalid
* after the barrier.
*/
rw_enter(&os_lock, RW_READER);
rw_exit(&os_lock);
kmem_free(os->os_obj_next_percpu,
os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
mutex_destroy(&os->os_lock);
mutex_destroy(&os->os_userused_lock);
mutex_destroy(&os->os_obj_lock);
mutex_destroy(&os->os_user_ptr_lock);
mutex_destroy(&os->os_upgrade_lock);
for (int i = 0; i < TXG_SIZE; i++)
multilist_destroy(&os->os_dirty_dnodes[i]);
spa_evicting_os_deregister(os->os_spa, os);
kmem_free(os, sizeof (objset_t));
}
inode_timespec_t
dmu_objset_snap_cmtime(objset_t *os)
{
return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
}
objset_t *
dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, int levels, int blksz, int ibs, dmu_tx_t *tx)
{
objset_t *os;
dnode_t *mdn;
ASSERT(dmu_tx_is_syncing(tx));
if (blksz == 0)
blksz = DNODE_BLOCK_SIZE;
if (ibs == 0)
ibs = DN_MAX_INDBLKSHIFT;
if (ds != NULL)
VERIFY0(dmu_objset_from_ds(ds, &os));
else
VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
mdn = DMU_META_DNODE(os);
dnode_allocate(mdn, DMU_OT_DNODE, blksz, ibs, DMU_OT_NONE, 0,
DNODE_MIN_SLOTS, tx);
/*
* We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quiescing context while
* we are also accessing it in open context.
*
* This precaution is not necessary for the MOS (ds == NULL),
* because the MOS is only updated in syncing context.
* This is most fortunate: the MOS is the only objset that
* needs to be synced multiple times as spa_sync() iterates
* to convergence, so minimizing its dn_nlevels matters.
*/
if (ds != NULL) {
if (levels == 0) {
levels = 1;
/*
* Determine the number of levels necessary for the
* meta-dnode to contain DN_MAX_OBJECT dnodes. Note
* that in order to ensure that we do not overflow
* 64 bits, there has to be a nlevels that gives us a
* number of blocks > DN_MAX_OBJECT but < 2^64.
* Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
* (10) must be less than (64 - log2(DN_MAX_OBJECT))
* (16).
*/
while ((uint64_t)mdn->dn_nblkptr <<
(mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) *
(mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
DN_MAX_OBJECT)
levels++;
}
mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
mdn->dn_nlevels = levels;
}
ASSERT(type != DMU_OST_NONE);
ASSERT(type != DMU_OST_ANY);
ASSERT(type < DMU_OST_NUMTYPES);
os->os_phys->os_type = type;
/*
* Enable user accounting if it is enabled and this is not an
* encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os)) {
ASSERT3P(ds, !=, NULL);
ds->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
}
if (dmu_objset_projectquota_enabled(os)) {
ASSERT3P(ds, !=, NULL);
ds->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
}
os->os_flags = os->os_phys->os_flags;
}
dsl_dataset_dirty(ds, tx);
return (os);
}
/* called from dsl for meta-objset */
objset_t *
dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, dmu_tx_t *tx)
{
return (dmu_objset_create_impl_dnstats(spa, ds, bp, type, 0, 0, 0, tx));
}
typedef struct dmu_objset_create_arg {
const char *doca_name;
cred_t *doca_cred;
proc_t *doca_proc;
void (*doca_userfunc)(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
void *doca_userarg;
dmu_objset_type_t doca_type;
uint64_t doca_flags;
dsl_crypto_params_t *doca_dcp;
} dmu_objset_create_arg_t;
static int
dmu_objset_create_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *tail;
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dataset_nestcheck(doca->doca_name) != 0)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dmu_objset_create_crypt_check(pdd, doca->doca_dcp, NULL);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* can't create below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(pdd->dd_pool,
dsl_dir_phys(pdd)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
static void
dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *ds;
uint64_t obj;
blkptr_t *bp;
objset_t *os;
zio_t *rzio;
VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
doca->doca_cred, doca->doca_dcp, tx);
VERIFY0(dsl_dataset_hold_obj_flags(pdd->dd_pool, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bp = dsl_dataset_get_blkptr(ds);
os = dmu_objset_create_impl(spa, ds, bp, doca->doca_type, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (doca->doca_userfunc != NULL) {
doca->doca_userfunc(os, doca->doca_userarg,
doca->doca_cred, tx);
}
/*
* The doca_userfunc() may write out some data that needs to be
* encrypted if the dataset is encrypted (specifically the root
* directory). This data must be written out before the encryption
* key mapping is removed by dsl_dataset_rele_flags(). Force the
* I/O to occur immediately by invoking the relevant sections of
* dsl_pool_sync().
*/
if (os->os_encrypted) {
dsl_dataset_t *tmpds = NULL;
boolean_t need_sync_done = B_FALSE;
mutex_enter(&ds->ds_lock);
ds->ds_owner = FTAG;
mutex_exit(&ds->ds_lock);
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dsl_dataset_sync(ds, rzio, tx);
need_sync_done = B_TRUE;
}
VERIFY0(zio_wait(rzio));
dmu_objset_sync_done(os, tx);
taskq_wait(dp->dp_sync_taskq);
if (txg_list_member(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
}
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rzio, tx);
}
VERIFY0(zio_wait(rzio));
if (need_sync_done) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
dsl_dataset_sync_done(ds, tx);
dmu_buf_rele(ds->ds_dbuf, ds);
}
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
}
spa_history_log_internal_ds(ds, "create", tx, " ");
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
dsl_crypto_params_t *dcp, dmu_objset_create_sync_func_t func, void *arg)
{
dmu_objset_create_arg_t doca;
dsl_crypto_params_t tmp_dcp = { 0 };
doca.doca_name = name;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
doca.doca_flags = flags;
doca.doca_userfunc = func;
doca.doca_userarg = arg;
doca.doca_type = type;
/*
* Some callers (mostly for testing) do not provide a dcp on their
* own but various code inside the sync task will require it to be
* allocated. Rather than adding NULL checks throughout this code
* or adding dummy dcp's to all of the callers we simply create a
* dummy one here and use that. This zero dcp will have the same
* effect as asking for inheritance of all encryption params.
*/
doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp;
int rv = dsl_sync_task(name,
dmu_objset_create_check, dmu_objset_create_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(name);
return (rv);
}
typedef struct dmu_objset_clone_arg {
const char *doca_clone;
const char *doca_origin;
cred_t *doca_cred;
proc_t *doca_proc;
} dmu_objset_clone_arg_t;
static int
dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_dir_t *pdd;
const char *tail;
int error;
dsl_dataset_t *origin;
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EDQUOT));
}
error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* You can only clone snapshots, not the head datasets. */
if (!origin->ds_is_snapshot) {
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EINVAL));
}
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (0);
}
static void
dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *origin, *ds;
uint64_t obj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
doca->doca_cred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
dsl_dataset_name(origin, namebuf);
spa_history_log_internal_ds(ds, "clone", tx,
"origin=%s (%llu)", namebuf, (u_longlong_t)origin->ds_object);
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_clone(const char *clone, const char *origin)
{
dmu_objset_clone_arg_t doca;
doca.doca_clone = clone;
doca.doca_origin = origin;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
int rv = dsl_sync_task(clone,
dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(clone);
return (rv);
}
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{
int err;
char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
nvlist_t *snaps = fnvlist_alloc();
fnvlist_add_boolean(snaps, longsnap);
kmem_strfree(longsnap);
err = dsl_dataset_snapshot(snaps, NULL, NULL);
fnvlist_free(snaps);
return (err);
}
static void
dmu_objset_upgrade_task_cb(void *data)
{
objset_t *os = data;
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = EINTR;
if (!os->os_upgrade_exit) {
int status;
mutex_exit(&os->os_upgrade_lock);
status = os->os_upgrade_cb(os);
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = status;
}
os->os_upgrade_exit = B_TRUE;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
static void
dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb)
{
if (os->os_upgrade_id != 0)
return;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
dsl_dataset_long_hold(dmu_objset_ds(os), upgrade_tag);
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) {
os->os_upgrade_exit = B_FALSE;
os->os_upgrade_cb = cb;
os->os_upgrade_id = taskq_dispatch(
os->os_spa->spa_upgrade_taskq,
dmu_objset_upgrade_task_cb, os, TQ_SLEEP);
if (os->os_upgrade_id == TASKQID_INVALID) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
os->os_upgrade_status = ENOMEM;
}
} else {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
mutex_exit(&os->os_upgrade_lock);
}
static void
dmu_objset_upgrade_stop(objset_t *os)
{
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_exit = B_TRUE;
if (os->os_upgrade_id != 0) {
taskqid_t id = os->os_upgrade_id;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
} else {
mutex_exit(&os->os_upgrade_lock);
}
}
static void
dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it outside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
ASSERT(dn->dn_zio);
ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
multilist_sublist_remove(list, dn);
/*
* See the comment above dnode_rele_task() for an explanation
* of why this dnode hold is always needed (even when not
* doing user accounting).
*/
multilist_t *newlist = &dn->dn_objset->os_synced_dnodes;
(void) dnode_add_ref(dn, newlist);
multilist_insert(newlist, dn);
dnode_sync(dn, tx);
}
}
static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
(void) abuf;
blkptr_t *bp = zio->io_bp;
objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
uint64_t fill = 0;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
ASSERT0(BP_GET_LEVEL(bp));
/*
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
* dnode and user/group/project accounting objects).
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
BP_SET_FILL(bp, fill);
if (os->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
*os->os_rootbp = *bp;
if (os->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
}
static void
dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
{
(void) abuf;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_t *os = arg;
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
dmu_tx_t *tx = os->os_synctx;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
kmem_free(bp, sizeof (*bp));
}
typedef struct sync_dnodes_arg {
multilist_t *sda_list;
int sda_sublist_idx;
multilist_t *sda_newlist;
dmu_tx_t *sda_tx;
} sync_dnodes_arg_t;
static void
sync_dnodes_task(void *arg)
{
sync_dnodes_arg_t *sda = arg;
multilist_sublist_t *ms =
multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
dmu_objset_sync_dnodes(ms, sda->sda_tx);
multilist_sublist_unlock(ms);
kmem_free(sda, sizeof (*sda));
}
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *zio;
list_t *list;
dbuf_dirty_record_t *dr;
int num_sublists;
multilist_t *ml;
blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
*blkptr_copy = *os->os_rootbp;
dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", (u_longlong_t)tx->tx_txg);
ASSERT(dmu_tx_is_syncing(tx));
/* XXX the write_done callback should really give us the tx... */
os->os_synctx = tx;
if (os->os_dsl_dataset == NULL) {
/*
* This is the MOS. If we have upgraded,
* spa_max_replication() could change, so reset
* os_copies here.
*/
os->os_copies = spa_max_replication(os->os_spa);
}
/*
* Create the root block IO
*/
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
arc_release(os->os_phys_buf, &os->os_phys_buf);
dmu_write_policy(os, NULL, 0, 0, &zp);
/*
* If we are either claiming the ZIL or doing a raw receive, write
* out the os_phys_buf raw. Neither of these actions will effect the
* MAC at this point.
*/
if (os->os_raw_receive ||
os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT(os->os_encrypted);
arc_convert_to_raw(os->os_phys_buf,
os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER,
DMU_OT_OBJSET, NULL, NULL, NULL);
}
zio = arc_write(pio, os->os_spa, tx->tx_txg,
blkptr_copy, os->os_phys_buf, B_FALSE, dmu_os_is_l2cacheable(os),
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
DMU_META_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_META_DNODE(os), tx);
os->os_phys->os_flags = os->os_flags;
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
if (DMU_PROJECTUSED_DNODE(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_PROJECTUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_PROJECTUSED_DNODE(os), tx);
}
txgoff = tx->tx_txg & TXG_MASK;
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg. But it may already
* exist because we call dsl_dataset_sync() twice per txg.
*/
if (os->os_synced_dnodes.ml_sublists == NULL) {
multilist_create(&os->os_synced_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]),
dnode_multilist_index_func);
} else {
ASSERT3U(os->os_synced_dnodes.ml_offset, ==,
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
ml = &os->os_dirty_dnodes[txgoff];
num_sublists = multilist_get_num_sublists(ml);
for (int i = 0; i < num_sublists; i++) {
if (multilist_sublist_is_empty_idx(ml, i))
continue;
sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
sda->sda_list = ml;
sda->sda_sublist_idx = i;
sda->sda_tx = tx;
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
sync_dnodes_task, sda, 0);
/* callback frees sda */
}
taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
- while ((dr = list_head(list)) != NULL) {
+ while ((dr = list_remove_head(list)) != NULL) {
ASSERT0(dr->dr_dbuf->db_level);
- list_remove(list, dr);
zio_nowait(dr->dr_zio);
}
/* Enable dnode backfill if enough objects have been freed. */
if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
os->os_rescan_dnodes = B_TRUE;
os->os_freed_dnodes = 0;
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
boolean_t
dmu_objset_is_dirty(objset_t *os, uint64_t txg)
{
return (!multilist_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]));
}
static file_info_cb_t *file_cbs[DMU_OST_NUMTYPES];
void
dmu_objset_register_type(dmu_objset_type_t ost, file_info_cb_t *cb)
{
file_cbs[ost] = cb;
}
int
dmu_get_file_info(objset_t *os, dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zfi)
{
file_info_cb_t *cb = file_cbs[os->os_phys->os_type];
if (cb == NULL)
return (EINVAL);
return (cb(bonustype, data, zfi));
}
boolean_t
dmu_objset_userused_enabled(objset_t *os)
{
return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
file_cbs[os->os_phys->os_type] != NULL &&
DMU_USERUSED_DNODE(os) != NULL);
}
boolean_t
dmu_objset_userobjused_enabled(objset_t *os)
{
return (dmu_objset_userused_enabled(os) &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING));
}
boolean_t
dmu_objset_projectquota_enabled(objset_t *os)
{
return (file_cbs[os->os_phys->os_type] != NULL &&
DMU_PROJECTUSED_DNODE(os) != NULL &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA));
}
typedef struct userquota_node {
/* must be in the first filed, see userquota_update_cache() */
char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN];
int64_t uqn_delta;
avl_node_t uqn_node;
} userquota_node_t;
typedef struct userquota_cache {
avl_tree_t uqc_user_deltas;
avl_tree_t uqc_group_deltas;
avl_tree_t uqc_project_deltas;
} userquota_cache_t;
static int
userquota_compare(const void *l, const void *r)
{
const userquota_node_t *luqn = l;
const userquota_node_t *ruqn = r;
int rv;
/*
* NB: can only access uqn_id because userquota_update_cache() doesn't
* pass in an entire userquota_node_t.
*/
rv = strcmp(luqn->uqn_id, ruqn->uqn_id);
return (TREE_ISIGN(rv));
}
static void
do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
{
void *cookie;
userquota_node_t *uqn;
ASSERT(dmu_tx_is_syncing(tx));
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
&cookie)) != NULL) {
/*
* os_userused_lock protects against concurrent calls to
* zap_increment_int(). It's needed because zap_increment_int()
* is not thread-safe (i.e. not atomic).
*/
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_user_deltas);
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_group_deltas);
if (dmu_objset_projectquota_enabled(os)) {
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_project_deltas);
}
}
static void
userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta)
{
userquota_node_t *uqn;
avl_index_t idx;
ASSERT(strlen(id) < sizeof (uqn->uqn_id));
/*
* Use id directly for searching because uqn_id is the first field of
* userquota_node_t and fields after uqn_id won't be accessed in
* avl_find().
*/
uqn = avl_find(avl, (const void *)id, &idx);
if (uqn == NULL) {
uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id));
avl_insert(avl, uqn, idx);
}
uqn->uqn_delta += delta;
}
static void
do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
uint64_t flags, uint64_t user, uint64_t group, uint64_t project,
boolean_t subtract)
{
if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) {
int64_t delta = DNODE_MIN_SIZE + used;
char name[20];
if (subtract)
delta = -delta;
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name), "%llx",
(longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
static void
do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags,
uint64_t user, uint64_t group, uint64_t project, boolean_t subtract)
{
if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) {
char name[20 + DMU_OBJACCT_PREFIX_LEN];
int delta = subtract ? -1 : 1;
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name),
DMU_OBJACCT_PREFIX "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
typedef struct userquota_updates_arg {
objset_t *uua_os;
int uua_sublist_idx;
dmu_tx_t *uua_tx;
} userquota_updates_arg_t;
static void
userquota_updates_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
dmu_tx_t *tx = uua->uua_tx;
dnode_t *dn;
userquota_cache_t cache = { { 0 } };
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
ASSERT(multilist_sublist_head(list) == NULL ||
dmu_objset_userused_enabled(os));
avl_create(&cache.uqc_user_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
avl_create(&cache.uqc_group_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
if (dmu_objset_projectquota_enabled(os))
avl_create(&cache.uqc_project_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t,
uqn_node));
while ((dn = multilist_sublist_head(list)) != NULL) {
int flags;
ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED);
flags = dn->dn_id_flags;
ASSERT(flags);
if (flags & DN_ID_OLD_EXIST) {
do_userquota_update(os, &cache, dn->dn_oldused,
dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
do_userobjquota_update(os, &cache, dn->dn_oldflags,
dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
}
if (flags & DN_ID_NEW_EXIST) {
do_userquota_update(os, &cache,
DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags,
dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
do_userobjquota_update(os, &cache,
dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
}
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
dn->dn_olduid = dn->dn_newuid;
dn->dn_oldgid = dn->dn_newgid;
dn->dn_oldprojid = dn->dn_newprojid;
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (dn->dn_bonuslen == 0)
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
else
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
do_userquota_cacheflush(os, &cache, tx);
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Release dnode holds from dmu_objset_sync_dnodes(). When the dnode is being
* synced (i.e. we have issued the zio's for blocks in the dnode), it can't be
* evicted because the block containing the dnode can't be evicted until it is
* written out. However, this hold is necessary to prevent the dnode_t from
* being moved (via dnode_move()) while it's still referenced by
* dbuf_dirty_record_t:dr_dnode. And dr_dnode is needed for
* dirty_lightweight_leaf-type dirty records.
*
* If we are doing user-object accounting, the dnode_rele() happens from
* userquota_updates_task() instead.
*/
static void
dnode_rele_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Return TRUE if userquota updates are needed.
*/
static boolean_t
dmu_objset_do_userquota_updates_prep(objset_t *os, dmu_tx_t *tx)
{
if (!dmu_objset_userused_enabled(os))
return (B_FALSE);
/*
* If this is a raw receive just return and handle accounting
* later when we have the keys loaded. We also don't do user
* accounting during claiming since the datasets are not owned
* for the duration of claiming and this txg should only be
* used for recovery.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return (B_FALSE);
if (tx->tx_txg <= os->os_spa->spa_claim_max_txg)
return (B_FALSE);
/* Allocate the user/group/project used objects if necessary. */
if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os,
DMU_USERUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
VERIFY0(zap_create_claim(os,
DMU_GROUPUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
if (dmu_objset_projectquota_enabled(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
return (B_TRUE);
}
/*
* Dispatch taskq tasks to dp_sync_taskq to update the user accounting, and
* also release the holds on the dnodes from dmu_objset_sync_dnodes().
* The caller must taskq_wait(dp_sync_taskq).
*/
void
dmu_objset_sync_done(objset_t *os, dmu_tx_t *tx)
{
boolean_t need_userquota = dmu_objset_do_userquota_updates_prep(os, tx);
int num_sublists = multilist_get_num_sublists(&os->os_synced_dnodes);
for (int i = 0; i < num_sublists; i++) {
userquota_updates_arg_t *uua =
kmem_alloc(sizeof (*uua), KM_SLEEP);
uua->uua_os = os;
uua->uua_sublist_idx = i;
uua->uua_tx = tx;
/*
* If we don't need to update userquotas, use
* dnode_rele_task() to call dnode_rele()
*/
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
need_userquota ? userquota_updates_task : dnode_rele_task,
uua, 0);
/* callback frees uua */
}
}
/*
* Returns a pointer to data to find uid/gid from
*
* If a dirty record for transaction group that is syncing can't
* be found then NULL is returned. In the NULL case it is assumed
* the uid/gid aren't changing.
*/
static void *
dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
void *data;
if (db->db_dirtycnt == 0)
return (db->db.db_data); /* Nothing is changing */
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
if (dr == NULL) {
data = NULL;
} else {
if (dr->dr_dnode->dn_bonuslen == 0 &&
dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
data = dr->dt.dl.dr_data->b_data;
else
data = dr->dt.dl.dr_data;
}
return (data);
}
void
dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
void *data = NULL;
dmu_buf_impl_t *db = NULL;
int flags = dn->dn_id_flags;
int error;
boolean_t have_spill = B_FALSE;
if (!dmu_objset_userused_enabled(dn->dn_objset))
return;
/*
* Raw receives introduce a problem with user accounting. Raw
* receives cannot update the user accounting info because the
* user ids and the sizes are encrypted. To guarantee that we
* never end up with bad user accounting, we simply disable it
* during raw receives. We also disable this for normal receives
* so that an incremental raw receive may be done on top of an
* existing non-raw receive.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
DN_ID_CHKED_SPILL)))
return;
if (before && dn->dn_bonuslen != 0)
data = DN_BONUS(dn->dn_phys);
else if (!before && dn->dn_bonuslen != 0) {
if (dn->dn_bonus) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
data = dmu_objset_userquota_find_data(db, tx);
} else {
data = DN_BONUS(dn->dn_phys);
}
} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
int rf = 0;
if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn,
rf | DB_RF_MUST_SUCCEED,
FTAG, (dmu_buf_t **)&db);
ASSERT(error == 0);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
have_spill = B_TRUE;
} else {
mutex_enter(&dn->dn_mtx);
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
mutex_exit(&dn->dn_mtx);
return;
}
/*
* Must always call the callback in case the object
* type has changed and that type isn't an object type to track
*/
zfs_file_info_t zfi;
error = file_cbs[os->os_phys->os_type](dn->dn_bonustype, data, &zfi);
if (before) {
ASSERT(data);
dn->dn_olduid = zfi.zfi_user;
dn->dn_oldgid = zfi.zfi_group;
dn->dn_oldprojid = zfi.zfi_project;
} else if (data) {
dn->dn_newuid = zfi.zfi_user;
dn->dn_newgid = zfi.zfi_group;
dn->dn_newprojid = zfi.zfi_project;
}
/*
* Preserve existing uid/gid when the callback can't determine
* what the new uid/gid are and the callback returned EEXIST.
* The EEXIST error tells us to just use the existing uid/gid.
* If we don't know what the old values are then just assign
* them to 0, since that is a new file being created.
*/
if (!before && data == NULL && error == EEXIST) {
if (flags & DN_ID_OLD_EXIST) {
dn->dn_newuid = dn->dn_olduid;
dn->dn_newgid = dn->dn_oldgid;
dn->dn_newprojid = dn->dn_oldprojid;
} else {
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
}
error = 0;
}
if (db)
mutex_exit(&db->db_mtx);
mutex_enter(&dn->dn_mtx);
if (error == 0 && before)
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (error == 0 && !before)
dn->dn_id_flags |= DN_ID_NEW_EXIST;
if (have_spill) {
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
} else {
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
mutex_exit(&dn->dn_mtx);
if (have_spill)
dmu_buf_rele((dmu_buf_t *)db, FTAG);
}
boolean_t
dmu_objset_userspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USERACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_userobjspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_projectquota_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_PROJECTQUOTA_COMPLETE);
}
static int
dmu_objset_space_upgrade(objset_t *os)
{
uint64_t obj;
int err = 0;
/*
* We simply need to mark every object dirty, so that it will be
* synced out and now accounted. If this is called
* concurrently, or if we already did some work before crashing,
* that's fine, since we track each object's accounted state
* independently.
*/
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
dmu_tx_t *tx;
dmu_buf_t *db;
int objerr;
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_exit)
err = SET_ERROR(EINTR);
mutex_exit(&os->os_upgrade_lock);
if (err != 0)
return (err);
if (issig(JUSTLOOKING) && issig(FORREAL))
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
continue;
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, obj);
objerr = dmu_tx_assign(tx, TXG_WAIT);
if (objerr != 0) {
dmu_buf_rele(db, FTAG);
dmu_tx_abort(tx);
continue;
}
dmu_buf_will_dirty(db, tx);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
}
return (0);
}
static int
dmu_objset_userspace_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userspace_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_userspace_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_userspace_upgrade_cb);
}
static int
dmu_objset_id_quota_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userobjspace_present(os) &&
dmu_objset_projectquota_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
if (!dmu_objset_projectquota_enabled(os) &&
dmu_objset_userobjspace_present(os))
return (SET_ERROR(ENOTSUP));
if (dmu_objset_userobjused_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
if (dmu_objset_projectquota_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os))
os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
if (dmu_objset_projectquota_enabled(os))
os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_id_quota_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb);
}
boolean_t
dmu_objset_userobjspace_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_userobjused_enabled(os) &&
!dmu_objset_userobjspace_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
boolean_t
dmu_objset_projectquota_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_projectquota_enabled(os) &&
!dmu_objset_projectquota_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
void
dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
usedobjsp, availobjsp);
}
uint64_t
dmu_objset_fsid_guid(objset_t *os)
{
return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
}
void
dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
{
stat->dds_type = os->os_phys->os_type;
if (os->os_dsl_dataset)
dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
}
void
dmu_objset_stats(objset_t *os, nvlist_t *nv)
{
ASSERT(os->os_dsl_dataset ||
os->os_phys->os_type == DMU_OST_META);
if (os->os_dsl_dataset != NULL)
dsl_dataset_stats(os->os_dsl_dataset, nv);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
os->os_phys->os_type);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
dmu_objset_userspace_present(os));
}
int
dmu_objset_is_snapshot(objset_t *os)
{
if (os->os_dsl_dataset != NULL)
return (os->os_dsl_dataset->ds_is_snapshot);
else
return (B_FALSE);
}
int
dmu_snapshot_realname(objset_t *os, const char *name, char *real, int maxlen,
boolean_t *conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
uint64_t ignored;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
MT_NORMALIZE, real, maxlen, conflict));
}
int
dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
zap_cursor_t cursor;
zap_attribute_t attr;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
if (case_conflict)
*case_conflict = attr.za_normalization_conflict;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
int
dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value)
{
return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value));
}
int
dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp)
{
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
zap_cursor_t cursor;
zap_attribute_t attr;
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dsl_dir_phys(dd)->dd_head_dataset_obj)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
typedef struct dmu_objset_find_ctx {
taskq_t *dc_tq;
dsl_pool_t *dc_dp;
uint64_t dc_ddobj;
char *dc_ddname; /* last component of ddobj's name */
int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
void *dc_arg;
int dc_flags;
kmutex_t *dc_error_lock;
int *dc_error;
} dmu_objset_find_ctx_t;
static void
dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
{
dsl_pool_t *dp = dcp->dc_dp;
dsl_dir_t *dd;
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
uint64_t thisobj;
int err = 0;
/* don't process if there already was an error */
if (*dcp->dc_error != 0)
goto out;
/*
* Note: passing the name (dc_ddname) here is optional, but it
* improves performance because we don't need to call
* zap_value_search() to determine the name.
*/
err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
if (err != 0)
goto out;
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
goto out;
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (dcp->dc_flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
dmu_objset_find_ctx_t *child_dcp =
kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
*child_dcp = *dcp;
child_dcp->dc_ddobj = attr->za_first_integer;
child_dcp->dc_ddname = spa_strdup(attr->za_name);
if (dcp->dc_tq != NULL)
(void) taskq_dispatch(dcp->dc_tq,
dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
else
dmu_objset_find_dp_impl(child_dcp);
}
zap_cursor_fini(&zc);
}
/*
* Iterate over all snapshots.
*/
if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
dsl_dataset_t *ds;
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
err = dsl_dataset_hold_obj(dp,
attr->za_first_integer, FTAG, &ds);
if (err != 0)
break;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
kmem_free(attr, sizeof (zap_attribute_t));
if (err != 0) {
dsl_dir_rele(dd, FTAG);
goto out;
}
/*
* Apply to self.
*/
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
/*
* Note: we hold the dir while calling dsl_dataset_hold_obj() so
* that the dir will remain cached, and we won't have to re-instantiate
* it (which could be expensive due to finding its name via
* zap_value_search()).
*/
dsl_dir_rele(dd, FTAG);
if (err != 0)
goto out;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
out:
if (err != 0) {
mutex_enter(dcp->dc_error_lock);
/* only keep first error */
if (*dcp->dc_error == 0)
*dcp->dc_error = err;
mutex_exit(dcp->dc_error_lock);
}
if (dcp->dc_ddname != NULL)
spa_strfree(dcp->dc_ddname);
kmem_free(dcp, sizeof (*dcp));
}
static void
dmu_objset_find_dp_cb(void *arg)
{
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
/*
* We need to get a pool_config_lock here, as there are several
* assert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock.
* The _prio interface gives us priority over a pending writer.
*/
dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);
dsl_pool_config_exit(dp, FTAG);
}
/*
* Find objsets under and including ddobj, call func(ds) on each.
* The order for the enumeration is completely undefined.
* func is called with dsl_pool_config held.
*/
int
dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
{
int error = 0;
taskq_t *tq = NULL;
int ntasks;
dmu_objset_find_ctx_t *dcp;
kmutex_t err_lock;
mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
dcp->dc_tq = NULL;
dcp->dc_dp = dp;
dcp->dc_ddobj = ddobj;
dcp->dc_ddname = NULL;
dcp->dc_func = func;
dcp->dc_arg = arg;
dcp->dc_flags = flags;
dcp->dc_error_lock = &err_lock;
dcp->dc_error = &error;
if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
/*
* In case a write lock is held we can't make use of
* parallelism, as down the stack of the worker threads
* the lock is asserted via dsl_pool_config_held.
* In case of a read lock this is solved by getting a read
* lock in each worker thread, which isn't possible in case
* of a writer lock. So we fall back to the synchronous path
* here.
* In the future it might be possible to get some magic into
* dsl_pool_config_held in a way that it returns true for
* the worker threads so that a single lock held from this
* thread suffices. For now, stay single threaded.
*/
dmu_objset_find_dp_impl(dcp);
mutex_destroy(&err_lock);
return (error);
}
ntasks = dmu_find_threads;
if (ntasks == 0)
ntasks = vdev_count_leaves(dp->dp_spa) * 4;
tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks,
INT_MAX, 0);
if (tq == NULL) {
kmem_free(dcp, sizeof (*dcp));
mutex_destroy(&err_lock);
return (SET_ERROR(ENOMEM));
}
dcp->dc_tq = tq;
/* dcp will be freed by task */
(void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
/*
* PORTING: this code relies on the property of taskq_wait to wait
* until no more tasks are queued and no more tasks are active. As
* we always queue new tasks from within other tasks, task_wait
* reliably waits for the full recursion to finish, even though we
* enqueue new tasks after taskq_wait has been called.
* On platforms other than illumos, taskq_wait may not have this
* property.
*/
taskq_wait(tq);
taskq_destroy(tq);
mutex_destroy(&err_lock);
return (error);
}
/*
* Find all objsets under name, and for each, call 'func(child_name, arg)'.
* The dp_config_rwlock must not be held when this is called, and it
* will not be held when the callback is called.
* Therefore this function should only be used when the pool is not changing
* (e.g. in syncing context), or the callback can deal with the possible races.
*/
static int
dmu_objset_find_impl(spa_t *spa, const char *name,
int func(const char *, void *), void *arg, int flags)
{
dsl_dir_t *dd;
dsl_pool_t *dp = spa_get_dsl(spa);
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
char *child;
uint64_t thisobj;
int err;
dsl_pool_config_enter(dp, FTAG);
err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
return (err);
}
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
return (0);
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s/%s", name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = dmu_objset_find_impl(spa, child,
func, arg, flags);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
if (err != 0) {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
return (err);
}
}
/*
* Iterate over all snapshots.
*/
if (flags & DS_FIND_SNAPSHOTS) {
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s@%s",
name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = func(child, arg);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
dsl_dir_rele(dd, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
dsl_pool_config_exit(dp, FTAG);
if (err != 0)
return (err);
/* Apply to self. */
return (func(name, arg));
}
/*
* See comment above dmu_objset_find_impl().
*/
int
dmu_objset_find(const char *name, int func(const char *, void *), void *arg,
int flags)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
error = dmu_objset_find_impl(spa, name, func, arg, flags);
spa_close(spa, FTAG);
return (error);
}
boolean_t
dmu_objset_incompatible_encryption_version(objset_t *os)
{
return (dsl_dir_incompatible_encryption_version(
os->os_dsl_dataset->ds_dir));
}
void
dmu_objset_set_user(objset_t *os, void *user_ptr)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
os->os_user_ptr = user_ptr;
}
void *
dmu_objset_get_user(objset_t *os)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
return (os->os_user_ptr);
}
/*
* Determine name of filesystem, given name of snapshot.
* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
*/
int
dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (SET_ERROR(EINVAL));
if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}
/*
* Call when we think we're going to write/free space in open context
* to track the amount of dirty data in the open txg, which is also the
* amount of memory that can not be evicted until this txg syncs.
*
* Note that there are two conditions where this can be called from
* syncing context:
*
* [1] When we just created the dataset, in which case we go on with
* updating any accounting of dirty data as usual.
* [2] When we are dirtying MOS data, in which case we only update the
* pool's accounting of dirty data.
*/
void
dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
if (ds != NULL) {
dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
}
dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dmu_objset_zil);
EXPORT_SYMBOL(dmu_objset_pool);
EXPORT_SYMBOL(dmu_objset_ds);
EXPORT_SYMBOL(dmu_objset_type);
EXPORT_SYMBOL(dmu_objset_name);
EXPORT_SYMBOL(dmu_objset_hold);
EXPORT_SYMBOL(dmu_objset_hold_flags);
EXPORT_SYMBOL(dmu_objset_own);
EXPORT_SYMBOL(dmu_objset_rele);
EXPORT_SYMBOL(dmu_objset_rele_flags);
EXPORT_SYMBOL(dmu_objset_disown);
EXPORT_SYMBOL(dmu_objset_from_ds);
EXPORT_SYMBOL(dmu_objset_create);
EXPORT_SYMBOL(dmu_objset_clone);
EXPORT_SYMBOL(dmu_objset_stats);
EXPORT_SYMBOL(dmu_objset_fast_stat);
EXPORT_SYMBOL(dmu_objset_spa);
EXPORT_SYMBOL(dmu_objset_space);
EXPORT_SYMBOL(dmu_objset_fsid_guid);
EXPORT_SYMBOL(dmu_objset_find);
EXPORT_SYMBOL(dmu_objset_byteswap);
EXPORT_SYMBOL(dmu_objset_evict_dbufs);
EXPORT_SYMBOL(dmu_objset_snap_cmtime);
EXPORT_SYMBOL(dmu_objset_dnodesize);
EXPORT_SYMBOL(dmu_objset_sync);
EXPORT_SYMBOL(dmu_objset_is_dirty);
EXPORT_SYMBOL(dmu_objset_create_impl_dnstats);
EXPORT_SYMBOL(dmu_objset_create_impl);
EXPORT_SYMBOL(dmu_objset_open_impl);
EXPORT_SYMBOL(dmu_objset_evict);
EXPORT_SYMBOL(dmu_objset_register_type);
EXPORT_SYMBOL(dmu_objset_sync_done);
EXPORT_SYMBOL(dmu_objset_userquota_get_ids);
EXPORT_SYMBOL(dmu_objset_userused_enabled);
EXPORT_SYMBOL(dmu_objset_userspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userspace_present);
EXPORT_SYMBOL(dmu_objset_userobjused_enabled);
EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable);
EXPORT_SYMBOL(dmu_objset_userobjspace_present);
EXPORT_SYMBOL(dmu_objset_projectquota_enabled);
EXPORT_SYMBOL(dmu_objset_projectquota_present);
EXPORT_SYMBOL(dmu_objset_projectquota_upgradable);
EXPORT_SYMBOL(dmu_objset_id_quota_upgrade);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index c22a95f8647f..2fdd7c1ece73 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -1,3799 +1,3799 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2022 Axcient.
*/
#include <sys/arc.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zvol.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
#include <sys/zfs_file.h>
static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
static uint_t zfs_recv_queue_ff = 20;
static uint_t zfs_recv_write_batch_size = 1024 * 1024;
static int zfs_recv_best_effort_corrective = 0;
static const void *const dmu_recv_tag = "dmu_recv_tag";
const char *const recv_clone_name = "%recv";
typedef enum {
ORNS_NO,
ORNS_YES,
ORNS_MAYBE
} or_need_sync_t;
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
void *buf);
struct receive_record_arg {
dmu_replay_record_t header;
void *payload; /* Pointer to a buffer containing the payload */
/*
* If the record is a WRITE or SPILL, pointer to the abd containing the
* payload.
*/
abd_t *abd;
int payload_size;
uint64_t bytes_read; /* bytes read from stream when record created */
boolean_t eos_marker; /* Marks the end of the stream */
bqueue_node_t node;
};
struct receive_writer_arg {
objset_t *os;
boolean_t byteswap;
bqueue_t q;
/*
* These three members are used to signal to the main thread when
* we're done.
*/
kmutex_t mutex;
kcondvar_t cv;
boolean_t done;
int err;
const char *tofs;
boolean_t heal;
boolean_t resumable;
boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
boolean_t full; /* this is a full send stream */
uint64_t last_object;
uint64_t last_offset;
uint64_t max_object; /* highest object ID referenced in stream */
uint64_t bytes_read; /* bytes read when current record created */
list_t write_batch;
/* Encryption parameters for the last received DRR_OBJECT_RANGE */
boolean_t or_crypt_params_present;
uint64_t or_firstobj;
uint64_t or_numslots;
uint8_t or_salt[ZIO_DATA_SALT_LEN];
uint8_t or_iv[ZIO_DATA_IV_LEN];
uint8_t or_mac[ZIO_DATA_MAC_LEN];
boolean_t or_byteorder;
zio_t *heal_pio;
/* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */
or_need_sync_t or_need_sync;
};
typedef struct dmu_recv_begin_arg {
const char *drba_origin;
dmu_recv_cookie_t *drba_cookie;
cred_t *drba_cred;
proc_t *drba_proc;
dsl_crypto_params_t *drba_dcp;
} dmu_recv_begin_arg_t;
static void
byteswap_record(dmu_replay_record_t *drr)
{
#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
switch (drr->drr_type) {
case DRR_BEGIN:
DO64(drr_begin.drr_magic);
DO64(drr_begin.drr_versioninfo);
DO64(drr_begin.drr_creation_time);
DO32(drr_begin.drr_type);
DO32(drr_begin.drr_flags);
DO64(drr_begin.drr_toguid);
DO64(drr_begin.drr_fromguid);
break;
case DRR_OBJECT:
DO64(drr_object.drr_object);
DO32(drr_object.drr_type);
DO32(drr_object.drr_bonustype);
DO32(drr_object.drr_blksz);
DO32(drr_object.drr_bonuslen);
DO32(drr_object.drr_raw_bonuslen);
DO64(drr_object.drr_toguid);
DO64(drr_object.drr_maxblkid);
break;
case DRR_FREEOBJECTS:
DO64(drr_freeobjects.drr_firstobj);
DO64(drr_freeobjects.drr_numobjs);
DO64(drr_freeobjects.drr_toguid);
break;
case DRR_WRITE:
DO64(drr_write.drr_object);
DO32(drr_write.drr_type);
DO64(drr_write.drr_offset);
DO64(drr_write.drr_logical_size);
DO64(drr_write.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
DO64(drr_write.drr_key.ddk_prop);
DO64(drr_write.drr_compressed_size);
break;
case DRR_WRITE_EMBEDDED:
DO64(drr_write_embedded.drr_object);
DO64(drr_write_embedded.drr_offset);
DO64(drr_write_embedded.drr_length);
DO64(drr_write_embedded.drr_toguid);
DO32(drr_write_embedded.drr_lsize);
DO32(drr_write_embedded.drr_psize);
break;
case DRR_FREE:
DO64(drr_free.drr_object);
DO64(drr_free.drr_offset);
DO64(drr_free.drr_length);
DO64(drr_free.drr_toguid);
break;
case DRR_SPILL:
DO64(drr_spill.drr_object);
DO64(drr_spill.drr_length);
DO64(drr_spill.drr_toguid);
DO64(drr_spill.drr_compressed_size);
DO32(drr_spill.drr_type);
break;
case DRR_OBJECT_RANGE:
DO64(drr_object_range.drr_firstobj);
DO64(drr_object_range.drr_numslots);
DO64(drr_object_range.drr_toguid);
break;
case DRR_REDACT:
DO64(drr_redact.drr_object);
DO64(drr_redact.drr_offset);
DO64(drr_redact.drr_length);
DO64(drr_redact.drr_toguid);
break;
case DRR_END:
DO64(drr_end.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
break;
default:
break;
}
if (drr->drr_type != DRR_BEGIN) {
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
}
#undef DO64
#undef DO32
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check that the new stream we're trying to receive is redacted with respect to
* a subset of the snapshots that the origin was redacted with respect to. For
* the reasons behind this, see the man page on redacted zfs sends and receives.
*/
static boolean_t
compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
uint64_t *redact_snaps, uint64_t num_redact_snaps)
{
/*
* Short circuit the comparison; if we are redacted with respect to
* more snapshots than the origin, we can't be redacted with respect
* to a subset.
*/
if (num_redact_snaps > origin_num_snaps) {
return (B_FALSE);
}
for (int i = 0; i < num_redact_snaps; i++) {
if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
redact_snaps[i])) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
{
uint64_t *origin_snaps;
uint64_t origin_num_snaps;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
int err = 0;
boolean_t ret = B_TRUE;
uint64_t *redact_snaps;
uint_t numredactsnaps;
/*
* If this is a full send stream, we're safe no matter what.
*/
if (drrb->drr_fromguid == 0)
return (ret);
VERIFY(dsl_dataset_get_uint64_array_feature(origin,
SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
0) {
/*
* If the send stream was sent from the redaction bookmark or
* the redacted version of the dataset, then we're safe. Verify
* that this is from the a compatible redaction bookmark or
* redacted dataset.
*/
if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
/*
* If the stream is redacted, it must be redacted with respect
* to a subset of what the origin is redacted with respect to.
* See case number 2 in the zfs man page section on redacted zfs
* send.
*/
err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
if (err != 0 || !compatible_redact_snaps(origin_snaps,
origin_num_snaps, redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
drrb->drr_toguid)) {
/*
* If the stream isn't redacted but the origin is, this must be
* one of the snapshots the origin is redacted with respect to.
* See case number 1 in the zfs man page section on redacted zfs
* send.
*/
err = EINVAL;
}
if (err != 0)
ret = B_FALSE;
return (ret);
}
/*
* If we previously received a stream with --large-block, we don't support
* receiving an incremental on top of it without --large-block. This avoids
* forcing a read-modify-write or trying to re-aggregate a string of WRITE
* records.
*/
static int
recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
{
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
!(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
return (0);
}
static int
recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
uint64_t fromguid, uint64_t featureflags)
{
uint64_t obj;
uint64_t children;
int error;
dsl_dataset_t *snap;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
/* Temporary clone name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
8, 1, &obj);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EBUSY) : error);
/* Resume state must not be set. */
if (dsl_dataset_has_resume_receive_state(ds))
return (SET_ERROR(EBUSY));
/* New snapshot name must not exist if we're not healing it. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj,
drba->drba_cookie->drc_tosnap, 8, 1, &obj);
if (drba->drba_cookie->drc_heal) {
if (error != 0)
return (error);
} else if (error != ENOENT) {
return (error == 0 ? SET_ERROR(EEXIST) : error);
}
/* Must not have children if receiving a ZVOL. */
error = zap_count(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
children > 0)
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
/*
* Check snapshot limit before receiving. We'll recheck again at the
* end, but might as well abort before receiving if we're already over
* the limit.
*
* Note that we do not check the file system limit with
* dsl_dir_fscount_check because the temporary %clones don't count
* against that limit.
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
NULL, drba->drba_cred, drba->drba_proc);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_heal) {
/* Encryption is incompatible with embedded data. */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Healing is not supported when in 'force' mode. */
if (drba->drba_cookie->drc_force)
return (SET_ERROR(EINVAL));
/* Must have keys loaded if doing encrypted non-raw recv. */
if (encrypted && !raw) {
if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
NULL, NULL) != 0)
return (SET_ERROR(EACCES));
}
error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
if (error != 0)
return (error);
/*
* When not doing best effort corrective recv healing can only
* be done if the send stream is for the same snapshot as the
* one we are trying to heal.
*/
if (zfs_recv_best_effort_corrective == 0 &&
drba->drba_cookie->drc_drrb->drr_toguid !=
dsl_dataset_phys(snap)->ds_guid) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(snap, FTAG);
} else if (fromguid != 0) {
/* Sanity check the incremental recv */
uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
/* Can't perform a raw receive on top of a non-raw receive */
if (!encrypted && raw)
return (SET_ERROR(EINVAL));
/* Encryption is incompatible with embedded data */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Find snapshot in this dir that matches fromguid. */
while (obj != 0) {
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (SET_ERROR(ENODEV));
if (snap->ds_dir != ds->ds_dir) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENODEV));
}
if (dsl_dataset_phys(snap)->ds_guid == fromguid)
break;
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (SET_ERROR(ENODEV));
if (drba->drba_cookie->drc_force) {
drba->drba_cookie->drc_fromsnapobj = obj;
} else {
/*
* If we are not forcing, there must be no
* changes since fromsnap. Raw sends have an
* additional constraint that requires that
* no "noop" snapshots exist between fromsnap
* and tosnap for the IVset checking code to
* work properly.
*/
if (dsl_dataset_modified_since_snap(ds, snap) ||
(raw &&
dsl_dataset_phys(ds)->ds_prev_snap_obj !=
snap->ds_object)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ETXTBSY));
}
drba->drba_cookie->drc_fromsnapobj =
ds->ds_prev->ds_object;
}
if (dsl_dataset_feature_is_active(snap,
SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
snap)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_check_large_blocks(snap, featureflags);
if (error != 0) {
dsl_dataset_rele(snap, FTAG);
return (error);
}
dsl_dataset_rele(snap, FTAG);
} else {
/* If full and not healing then must be forced. */
if (!drba->drba_cookie->drc_force)
return (SET_ERROR(EEXIST));
/*
* We don't support using zfs recv -F to blow away
* encrypted filesystems. This would require the
* dsl dir to point to the old encryption key and
* the new one at the same time during the receive.
*/
if ((!encrypted && raw) || encrypted)
return (SET_ERROR(EINVAL));
/*
* Perform the same encryption checks we would if
* we were creating a new dataset from scratch.
*/
if (!raw) {
boolean_t will_encrypt;
error = dmu_objset_create_crypt_check(
ds->ds_dir->dd_parent, drba->drba_dcp,
&will_encrypt);
if (error != 0)
return (error);
if (will_encrypt && embed)
return (SET_ERROR(EINVAL));
}
}
return (0);
}
/*
* Check that any feature flags used in the data stream we're receiving are
* supported by the pool we are receiving into.
*
* Note that some of the features we explicitly check here have additional
* (implicit) features they depend on, but those dependencies are enforced
* through the zfeature_register() calls declaring the features that we
* explicitly check.
*/
static int
recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
{
/*
* Check if there are any unsupported feature flags.
*/
if (!DMU_STREAM_SUPPORTED(featureflags)) {
return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
}
/* Verify pool version supports SA if SA_SPILL feature set */
if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(spa) < SPA_VERSION_SA)
return (SET_ERROR(ENOTSUP));
/*
* LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
* and large_dnodes in the stream can only be used if those pool
* features are enabled because we don't attempt to decompress /
* un-embed / un-mooch / split up the blocks / dnodes during the
* receive process.
*/
if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (SET_ERROR(ENOTSUP));
/*
* Receiving redacted streams requires that redacted datasets are
* enabled.
*/
if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
return (SET_ERROR(ENOTSUP));
return (0);
}
static int
dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
uint64_t fromguid = drrb->drr_fromguid;
int flags = drrb->drr_flags;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
const char *tofs = drba->drba_cookie->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (SET_ERROR(EINVAL));
error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
if (error != 0)
return (error);
/* Resumable receives require extensible datasets */
if (drba->drba_cookie->drc_resumable &&
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
return (SET_ERROR(ENOTSUP));
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
return (SET_ERROR(ENOTSUP));
/* embedded data is incompatible with encryption and raw recv */
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (SET_ERROR(EINVAL));
/* raw receives require spill block allocation flag */
if (!(flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
/*
* We support unencrypted datasets below encrypted ones now,
* so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
* with a dataset we may encrypt.
*/
if (drba->drba_dcp == NULL ||
drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid,
featureflags);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
char buf[ZFS_MAX_DATASET_NAME_LEN];
objset_t *os;
/* healing recv must be done "into" an existing snapshot */
if (drba->drba_cookie->drc_heal == B_TRUE)
return (SET_ERROR(ENOTSUP));
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
drba->drba_origin))
return (SET_ERROR(ENOENT));
/*
* If we're receiving a full send as a clone, and it doesn't
* contain all the necessary free records and freeobject
* records, reject it.
*/
if (fromguid == 0 && drba->drba_origin != NULL &&
!(flags & DRR_FLAG_FREERECORDS))
return (SET_ERROR(EINVAL));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, sizeof (buf));
(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
error = dsl_dataset_hold(dp, buf, FTAG, &ds);
if (error != 0)
return (error);
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
drba->drba_origin == NULL) {
boolean_t will_encrypt;
/*
* Check that we aren't breaking any encryption rules
* and that we have all the parameters we need to
* create an encrypted dataset if necessary. If we are
* making an encrypted dataset the stream can't have
* embedded data.
*/
error = dmu_objset_create_crypt_check(ds->ds_dir,
drba->drba_dcp, &will_encrypt);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (will_encrypt &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Check filesystem and snapshot limits before receiving. We'll
* recheck snapshot limits again at the end (we create the
* filesystems and increment those counts during begin_sync).
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_FILESYSTEM_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/* can't recv below anything but filesystems (eg. no ZVOLs) */
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
if (drba->drba_origin != NULL) {
dsl_dataset_t *origin;
error = dsl_dataset_hold_flags(dp, drba->drba_origin,
dsflags, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (!origin->ds_is_snapshot) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
fromguid != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENODEV));
}
if (origin->ds_dir->dd_crypto_obj != 0 &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If the origin is redacted we need to verify that this
* send stream can safely be received on top of the
* origin.
*/
if (dsl_dataset_feature_is_active(origin,
SPA_FEATURE_REDACTED_DATASETS)) {
if (!redact_check(drba, origin)) {
dsl_dataset_rele_flags(origin, dsflags,
FTAG);
dsl_dataset_rele_flags(ds, dsflags,
FTAG);
return (SET_ERROR(EINVAL));
}
}
error = recv_check_large_blocks(ds, featureflags);
if (error != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(origin, dsflags, FTAG);
}
dsl_dataset_rele(ds, FTAG);
error = 0;
}
return (error);
}
static void
dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
const char *tofs = drc->drc_tofs;
uint64_t featureflags = drc->drc_featureflags;
dsl_dataset_t *ds, *newds;
objset_t *os;
uint64_t dsobj;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t crflags = 0;
dsl_crypto_params_t dummy_dcp = { 0 };
dsl_crypto_params_t *dcp = drba->drba_dcp;
if (drrb->drr_flags & DRR_FLAG_CI_DATA)
crflags |= DS_FLAG_CI_DATASET;
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
dsflags |= DS_HOLD_FLAG_DECRYPT;
/*
* Raw, non-incremental recvs always use a dummy dcp with
* the raw cmd set. Raw incremental recvs do not use a dcp
* since the encryption parameters are already set in stone.
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
ASSERT3P(dcp, ==, NULL);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
dcp->cp_cmd = DCP_CMD_RAW_RECV;
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* Create temporary clone unless we're doing corrective recv */
dsl_dataset_t *snap = NULL;
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
ASSERT3P(dcp, ==, NULL);
}
if (drc->drc_heal) {
/* When healing we want to use the provided snapshot */
VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
&dsobj));
} else {
dsobj = dsl_dataset_create_sync(ds->ds_dir,
recv_clone_name, snap, crflags, drba->drba_cred,
dcp, tx);
}
if (drba->drba_cookie->drc_fromsnapobj != 0)
dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else {
dsl_dir_t *dd;
const char *tail;
dsl_dataset_t *origin = NULL;
VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
ASSERT3P(dcp, ==, NULL);
}
/* Create new dataset. */
dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
origin, crflags, drba->drba_cred, dcp, tx);
if (origin != NULL)
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(dd, FTAG);
drc->drc_newfs = B_TRUE;
}
VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
&newds));
if (dsl_dataset_feature_is_active(newds,
SPA_FEATURE_REDACTED_DATASETS)) {
/*
* If the origin dataset is redacted, the child will be redacted
* when we create it. We clear the new dataset's
* redaction info; if it should be redacted, we'll fill
* in its information later.
*/
dsl_dataset_deactivate_feature(newds,
SPA_FEATURE_REDACTED_DATASETS, tx);
}
VERIFY0(dmu_objset_from_ds(newds, &os));
if (drc->drc_resumable) {
dsl_dataset_zapify(newds, tx);
if (drrb->drr_fromguid != 0) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
8, 1, &drrb->drr_fromguid, tx));
}
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
8, 1, &drrb->drr_toguid, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
uint64_t one = 1;
uint64_t zero = 0;
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
8, 1, &one, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
8, 1, &zero, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
8, 1, &zero, tx));
if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
8, 1, &one, tx));
}
uint64_t *redact_snaps;
uint_t numredactsnaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
&numredactsnaps) == 0) {
VERIFY0(zap_add(mos, dsobj,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
sizeof (*redact_snaps), numredactsnaps,
redact_snaps, tx));
}
}
/*
* Usually the os->os_encrypted value is tied to the presence of a
* DSL Crypto Key object in the dd. However, that will not be received
* until dmu_recv_stream(), so we set the value manually for now.
*/
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
os->os_encrypted = B_TRUE;
drba->drba_cookie->drc_raw = B_TRUE;
}
if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t *redact_snaps;
uint_t numredactsnaps;
VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
dsl_dataset_activate_redaction(newds, redact_snaps,
numredactsnaps, tx);
}
dmu_buf_will_dirty(newds->ds_dbuf, tx);
dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
/*
* If we actually created a non-clone, we need to create the objset
* in our new dataset. If this is a raw send we postpone this until
* dmu_recv_stream() so that we can allocate the metadnode with the
* properties from the DRR_BEGIN payload.
*/
rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
(featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
!drc->drc_heal) {
(void) dmu_objset_create_impl(dp->dp_spa,
newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
}
rrw_exit(&newds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = newds;
drba->drba_cookie->drc_os = os;
spa_history_log_internal_ds(newds, "receive", tx, " ");
}
static int
dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dmu_recv_cookie_t *drc = drba->drba_cookie;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drc->drc_drrb;
int error;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
dsl_dataset_t *ds;
const char *tofs = drc->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES)
return (SET_ERROR(EINVAL));
/*
* This is mostly a sanity check since we should have already done these
* checks during a previous attempt to receive the data.
*/
error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
dp->dp_spa);
if (error != 0)
return (error);
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s",
tofs, recv_clone_name);
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require spill block allocation flag */
if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
boolean_t recvexist = B_TRUE;
if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
/* %recv does not exist; continue in tofs */
recvexist = B_FALSE;
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error != 0)
return (error);
}
/*
* Resume of full/newfs recv on existing dataset should be done with
* force flag
*/
if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(ZFS_ERR_RESUME_EXISTS));
}
/* check that ds is marked inconsistent */
if (!DS_IS_INCONSISTENT(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/* check that there is resuming data, and that the toguid matches */
if (!dsl_dataset_is_zapified(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
uint64_t val;
error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
if (error != 0 || drrb->drr_toguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Check if the receive is still running. If so, it will be owned.
* Note that nothing else can own the dataset (e.g. after the receive
* fails) because it will be marked inconsistent.
*/
if (dsl_dataset_has_owner(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EBUSY));
}
/* There should not be any snapshots of this fs yet. */
if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Note: resume point will be checked when we process the first WRITE
* record.
*/
/* check that the origin matches */
val = 0;
(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
if (drrb->drr_fromguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
drc->drc_fromsnapobj = ds->ds_prev->ds_object;
/*
* If we're resuming, and the send is redacted, then the original send
* must have been redacted, and must have been redacted with respect to
* the same snapshots.
*/
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t num_ds_redact_snaps;
uint64_t *ds_redact_snaps;
uint_t num_stream_redact_snaps;
uint64_t *stream_redact_snaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
&num_stream_redact_snaps) != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (!dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
&ds_redact_snaps)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
for (int i = 0; i < num_ds_redact_snaps; i++) {
if (!redact_snaps_contains(ds_redact_snaps,
num_ds_redact_snaps, stream_redact_snaps[i])) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
}
}
error = recv_check_large_blocks(ds, drc->drc_featureflags);
if (error != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (0);
}
static void
dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
recv_clone_name);
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
drba->drba_cookie->drc_raw = B_TRUE;
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
!= 0) {
/* %recv does not exist; continue in tofs */
VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
&ds));
drba->drba_cookie->drc_newfs = B_TRUE;
}
ASSERT(DS_IS_INCONSISTENT(ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
drba->drba_cookie->drc_raw);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
drba->drba_cookie->drc_should_save = B_TRUE;
spa_history_log_internal_ds(ds, "resume receive", tx, " ");
}
/*
* NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
dmu_recv_begin(const char *tofs, const char *tosnap,
dmu_replay_record_t *drr_begin, boolean_t force, boolean_t heal,
boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args,
const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp,
offset_t *voffp)
{
dmu_recv_begin_arg_t drba = { 0 };
int err = 0;
memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
drc->drc_tofs = tofs;
drc->drc_force = force;
drc->drc_heal = heal;
drc->drc_resumable = resumable;
drc->drc_cred = CRED();
drc->drc_proc = curproc;
drc->drc_clone = (origin != NULL);
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
drc->drc_byteswap = B_TRUE;
(void) fletcher_4_incremental_byteswap(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
byteswap_record(drr_begin);
} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
(void) fletcher_4_incremental_native(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
} else {
return (SET_ERROR(EINVAL));
}
drc->drc_fp = fp;
drc->drc_voff = *voffp;
drc->drc_featureflags =
DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
/*
* Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace
* configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard
* upper limit. Systems with less than 1GB of RAM will see a lower
* limit from `arc_all_memory() / 4`.
*/
if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4)))
return (E2BIG);
if (payloadlen != 0) {
void *payload = vmem_alloc(payloadlen, KM_SLEEP);
/*
* For compatibility with recursive send streams, we don't do
* this here if the stream could be part of a package. Instead,
* we'll do it in dmu_recv_stream. If we pull the next header
* too early, and it's the END record, we break the `recv_skip`
* logic.
*/
err = receive_read_payload_and_next_header(drc, payloadlen,
payload);
if (err != 0) {
vmem_free(payload, payloadlen);
return (err);
}
err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
KM_SLEEP);
vmem_free(payload, payloadlen);
if (err != 0) {
kmem_free(drc->drc_next_rrd,
sizeof (*drc->drc_next_rrd));
return (err);
}
}
if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
drc->drc_spill = B_TRUE;
drba.drba_origin = origin;
drba.drba_cookie = drc;
drba.drba_cred = CRED();
drba.drba_proc = curproc;
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = dsl_sync_task(tofs,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
* with "zfs recv -o". For these receives we create a dcp and
* pass it to the sync task. Creating the dcp will implicitly
* remove the encryption params from the localprops nvlist,
* which avoids errors when trying to set these normally
* read-only properties. Any other kind of receive that
* attempts to set these properties will fail as a result.
*/
if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW) == 0 &&
origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
localprops, hidden_args, &drba.drba_dcp);
}
if (err == 0) {
err = dsl_sync_task(tofs,
dmu_recv_begin_check, dmu_recv_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
dsl_crypto_params_free(drba.drba_dcp, !!err);
}
}
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
nvlist_free(drc->drc_begin_nvl);
}
return (err);
}
/*
* Holds data need for corrective recv callback
*/
typedef struct cr_cb_data {
uint64_t size;
zbookmark_phys_t zb;
spa_t *spa;
} cr_cb_data_t;
static void
corrective_read_done(zio_t *zio)
{
cr_cb_data_t *data = zio->io_private;
/* Corruption corrected; update error log if needed */
if (zio->io_error == 0)
spa_remove_error(data->spa, &data->zb, &zio->io_bp->blk_birth);
kmem_free(data, sizeof (cr_cb_data_t));
abd_free(zio->io_abd);
}
/*
* zio_rewrite the data pointed to by bp with the data from the rrd's abd.
*/
static int
do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
struct receive_record_arg *rrd, blkptr_t *bp)
{
int err;
zio_t *io;
zbookmark_phys_t zb;
dnode_t *dn;
abd_t *abd = rrd->abd;
zio_cksum_t bp_cksum = bp->blk_cksum;
- zio_flag_t flags = ZIO_FLAG_SPECULATIVE |
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL;
+ zio_flag_t flags = ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_RETRY |
+ ZIO_FLAG_CANFAIL;
if (rwa->raw)
flags |= ZIO_FLAG_RAW;
err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
if (err != 0)
return (err);
SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
dbuf_whichblock(dn, 0, drrw->drr_offset));
dnode_rele(dn, FTAG);
if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
/* Decompress the stream data */
abd_t *dabd = abd_alloc_linear(
drrw->drr_logical_size, B_FALSE);
err = zio_decompress_data(drrw->drr_compressiontype,
abd, abd_to_buf(dabd), abd_get_size(abd),
abd_get_size(dabd), NULL);
if (err != 0) {
abd_free(dabd);
return (err);
}
/* Swap in the newly decompressed data into the abd */
abd_free(abd);
abd = dabd;
}
if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/* Recompress the data */
abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
B_FALSE);
void *buf = abd_to_buf(cabd);
uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
abd, &buf, abd_get_size(abd),
rwa->os->os_complevel);
abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
/* Swap in newly compressed data into the abd */
abd_free(abd);
abd = cabd;
flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* The stream is not encrypted but the data on-disk is.
* We need to re-encrypt the buf using the same
* encryption type, salt, iv, and mac that was used to encrypt
* the block previosly.
*/
if (!rwa->raw && BP_USES_CRYPT(bp)) {
dsl_dataset_t *ds;
dsl_crypto_key_t *dck = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
dsl_pool_t *dp = dmu_objset_pool(rwa->os);
abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
dsl_pool_config_enter(dp, FTAG);
err = dsl_dataset_hold_flags(dp, rwa->tofs,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
/* Look up the key from the spa's keystore */
err = spa_keystore_lookup_key(rwa->os->os_spa,
zb.zb_objset, FTAG, &dck);
if (err != 0) {
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
FTAG);
dsl_pool_config_exit(dp, FTAG);
abd_free(eabd);
return (SET_ERROR(EACCES));
}
err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
mac, abd_get_size(abd), abd, eabd, &no_crypt);
spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_pool_config_exit(dp, FTAG);
ASSERT0(no_crypt);
if (err != 0) {
abd_free(eabd);
return (err);
}
/* Swap in the newly encrypted data into the abd */
abd_free(abd);
abd = eabd;
/*
* We want to prevent zio_rewrite() from trying to
* encrypt the data again
*/
flags |= ZIO_FLAG_RAW_ENCRYPT;
}
rrd->abd = abd;
io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
abd_get_size(abd) == BP_GET_PSIZE(bp));
/* compute new bp checksum value and make sure it matches the old one */
zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
zio_destroy(io);
if (zfs_recv_best_effort_corrective != 0)
return (0);
return (SET_ERROR(ECKSUM));
}
/* Correct the corruption in place */
err = zio_wait(io);
if (err == 0) {
cr_cb_data_t *cb_data =
kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
cb_data->spa = rwa->os->os_spa;
cb_data->size = drrw->drr_logical_size;
cb_data->zb = zb;
/* Test if healing worked by re-reading the bp */
err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
drrw->drr_logical_size, corrective_read_done,
cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
}
if (err != 0 && zfs_recv_best_effort_corrective != 0)
err = 0;
return (err);
}
static int
receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
{
int done = 0;
/*
* The code doesn't rely on this (lengths being multiples of 8). See
* comment in dump_bytes.
*/
ASSERT(len % 8 == 0 ||
(drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
while (done < len) {
ssize_t resid = len - done;
zfs_file_t *fp = drc->drc_fp;
int err = zfs_file_read(fp, (char *)buf + done,
len - done, &resid);
if (err == 0 && resid == len - done) {
/*
* Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
* that the receive was interrupted and can
* potentially be resumed.
*/
err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
}
drc->drc_voff += len - done - resid;
done = len - resid;
if (err != 0)
return (err);
}
drc->drc_bytes_read += len;
ASSERT3U(done, ==, len);
return (0);
}
static inline uint8_t
deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
{
if (bonus_type == DMU_OT_SA) {
return (1);
} else {
return (1 +
((DN_OLD_MAX_BONUSLEN -
MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
}
}
static void
save_resume_state(struct receive_writer_arg *rwa,
uint64_t object, uint64_t offset, dmu_tx_t *tx)
{
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (!rwa->resumable)
return;
/*
* We use ds_resume_bytes[] != 0 to indicate that we need to
* update this on disk, so it must not be 0.
*/
ASSERT(rwa->bytes_read != 0);
/*
* We only resume from write records, which have a valid
* (non-meta-dnode) object number.
*/
ASSERT(object != 0);
/*
* For resuming to work correctly, we must receive records in order,
* sorted by object,offset. This is checked by the callers, but
* assert it here for good measure.
*/
ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
ASSERT3U(rwa->bytes_read, >=,
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
}
static int
receive_object_is_same_generation(objset_t *os, uint64_t object,
dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
const void *new_bonus, boolean_t *samegenp)
{
zfs_file_info_t zoi;
int err;
dmu_buf_t *old_bonus_dbuf;
err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
if (err != 0)
return (err);
err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
&zoi);
dmu_buf_rele(old_bonus_dbuf, FTAG);
if (err != 0)
return (err);
uint64_t old_gen = zoi.zfi_generation;
err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
if (err != 0)
return (err);
uint64_t new_gen = zoi.zfi_generation;
*samegenp = (old_gen == new_gen);
return (0);
}
static int
receive_handle_existing_object(const struct receive_writer_arg *rwa,
const struct drr_object *drro, const dmu_object_info_t *doi,
const void *bonus_data,
uint64_t *object_to_hold, uint32_t *new_blksz)
{
uint32_t indblksz = drro->drr_indblkshift ?
1ULL << drro->drr_indblkshift : 0;
int nblkptr = deduce_nblkptr(drro->drr_bonustype,
drro->drr_bonuslen);
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
boolean_t do_free_range = B_FALSE;
int err;
*object_to_hold = drro->drr_object;
/* nblkptr should be bounded by the bonus size and type */
if (rwa->raw && nblkptr != drro->drr_nblkptr)
return (SET_ERROR(EINVAL));
/*
* After the previous send stream, the sending system may
* have freed this object, and then happened to re-allocate
* this object number in a later txg. In this case, we are
* receiving a different logical file, and the block size may
* appear to be different. i.e. we may have a different
* block size for this object than what the send stream says.
* In this case we need to remove the object's contents,
* so that its structure can be changed and then its contents
* entirely replaced by subsequent WRITE records.
*
* If this is a -L (--large-block) incremental stream, and
* the previous stream was not -L, the block size may appear
* to increase. i.e. we may have a smaller block size for
* this object than what the send stream says. In this case
* we need to keep the object's contents and block size
* intact, so that we don't lose parts of the object's
* contents that are not changed by this incremental send
* stream.
*
* We can distinguish between the two above cases by using
* the ZPL's generation number (see
* receive_object_is_same_generation()). However, we only
* want to rely on the generation number when absolutely
* necessary, because with raw receives, the generation is
* encrypted. We also want to minimize dependence on the
* ZPL, so that other types of datasets can also be received
* (e.g. ZVOLs, although note that ZVOLS currently do not
* reallocate their objects or change their structure).
* Therefore, we check a number of different cases where we
* know it is safe to discard the object's contents, before
* using the ZPL's generation number to make the above
* distinction.
*/
if (drro->drr_blksz != doi->doi_data_block_size) {
if (rwa->raw) {
/*
* RAW streams always have large blocks, so
* we are sure that the data is not needed
* due to changing --large-block to be on.
* Which is fortunate since the bonus buffer
* (which contains the ZPL generation) is
* encrypted, and the key might not be
* loaded.
*/
do_free_range = B_TRUE;
} else if (rwa->full) {
/*
* This is a full send stream, so it always
* replaces what we have. Even if the
* generation numbers happen to match, this
* can not actually be the same logical file.
* This is relevant when receiving a full
* send as a clone.
*/
do_free_range = B_TRUE;
} else if (drro->drr_type !=
DMU_OT_PLAIN_FILE_CONTENTS ||
doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
/*
* PLAIN_FILE_CONTENTS are the only type of
* objects that have ever been stored with
* large blocks, so we don't need the special
* logic below. ZAP blocks can shrink (when
* there's only one block), so we don't want
* to hit the error below about block size
* only increasing.
*/
do_free_range = B_TRUE;
} else if (doi->doi_max_offset <=
doi->doi_data_block_size) {
/*
* There is only one block. We can free it,
* because its contents will be replaced by a
* WRITE record. This can not be the no-L ->
* -L case, because the no-L case would have
* resulted in multiple blocks. If we
* supported -L -> no-L, it would not be safe
* to free the file's contents. Fortunately,
* that is not allowed (see
* recv_check_large_blocks()).
*/
do_free_range = B_TRUE;
} else {
boolean_t is_same_gen;
err = receive_object_is_same_generation(rwa->os,
drro->drr_object, doi->doi_bonus_type,
drro->drr_bonustype, bonus_data, &is_same_gen);
if (err != 0)
return (SET_ERROR(EINVAL));
if (is_same_gen) {
/*
* This is the same logical file, and
* the block size must be increasing.
* It could only decrease if
* --large-block was changed to be
* off, which is checked in
* recv_check_large_blocks().
*/
if (drro->drr_blksz <=
doi->doi_data_block_size)
return (SET_ERROR(EINVAL));
/*
* We keep the existing blocksize and
* contents.
*/
*new_blksz =
doi->doi_data_block_size;
} else {
do_free_range = B_TRUE;
}
}
}
/* nblkptr can only decrease if the object was reallocated */
if (nblkptr < doi->doi_nblkptr)
do_free_range = B_TRUE;
/* number of slots can only change on reallocation */
if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
do_free_range = B_TRUE;
/*
* For raw sends we also check a few other fields to
* ensure we are preserving the objset structure exactly
* as it was on the receive side:
* - A changed indirect block size
* - A smaller nlevels
*/
if (rwa->raw) {
if (indblksz != doi->doi_metadata_block_size)
do_free_range = B_TRUE;
if (drro->drr_nlevels < doi->doi_indirection)
do_free_range = B_TRUE;
}
if (do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
0, DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
/*
* The dmu does not currently support decreasing nlevels
* or changing the number of dnode slots on an object. For
* non-raw sends, this does not matter and the new object
* can just use the previous one's nlevels. For raw sends,
* however, the structure of the received dnode (including
* nlevels and dnode slots) must match that of the send
* side. Therefore, instead of using dmu_object_reclaim(),
* we must free the object completely and call
* dmu_object_claim_dnsize() instead.
*/
if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
*object_to_hold = DMU_NEW_OBJECT;
}
/*
* For raw receives, free everything beyond the new incoming
* maxblkid. Normally this would be done with a DRR_FREE
* record that would come after this DRR_OBJECT record is
* processed. However, for raw receives we manually set the
* maxblkid from the drr_maxblkid and so we must first free
* everything above that blkid to ensure the DMU is always
* consistent with itself. We will never free the first block
* of the object here because a maxblkid of 0 could indicate
* an object with a single block or one with no blocks. This
* free may be skipped when dmu_free_long_range() was called
* above since it covers the entire object's contents.
*/
if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
(drro->drr_maxblkid + 1) * doi->doi_data_block_size,
DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
return (0);
}
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
int err;
uint32_t new_blksz = drro->drr_blksz;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
!DMU_OT_IS_VALID(drro->drr_bonustype) ||
drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL));
}
if (rwa->raw) {
/*
* We should have received a DRR_OBJECT_RANGE record
* containing this block and stored it in rwa.
*/
if (drro->drr_object < rwa->or_firstobj ||
drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
drro->drr_raw_bonuslen < drro->drr_bonuslen ||
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
drro->drr_nblkptr > DN_MAX_NBLKPTR ||
DN_SLOTS_TO_BONUSLEN(dn_slots) <
drro->drr_raw_bonuslen)
return (SET_ERROR(EINVAL));
} else {
/*
* The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
* record indicates this by setting DRR_FLAG_SPILL_BLOCK.
*/
if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
(!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
return (SET_ERROR(EINVAL));
}
if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
return (SET_ERROR(EINVAL));
}
}
err = dmu_object_info(rwa->os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT && err != EEXIST)
return (SET_ERROR(EINVAL));
if (drro->drr_object > rwa->max_object)
rwa->max_object = drro->drr_object;
/*
* If we are losing blkptrs or changing the block size this must
* be a new file instance. We must clear out the previous file
* contents before we can change this type of metadata in the dnode.
* Raw receives will also check that the indirect structure of the
* dnode hasn't changed.
*/
uint64_t object_to_hold;
if (err == 0) {
err = receive_handle_existing_object(rwa, drro, &doi, data,
&object_to_hold, &new_blksz);
if (err != 0)
return (err);
} else if (err == EEXIST) {
/*
* The object requested is currently an interior slot of a
* multi-slot dnode. This will be resolved when the next txg
* is synced out, since the send stream will have told us
* to free this slot when we freed the associated dnode
* earlier in the stream.
*/
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
} else {
/*
* If the only record in this range so far was DRR_FREEOBJECTS
* with at least one actually freed object, it's possible that
* the block will now be converted to a hole. We need to wait
* for the txg to sync to prevent races.
*/
if (rwa->or_need_sync == ORNS_YES)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
/* object is free and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
}
/* Only relevant for the first object in the range */
rwa->or_need_sync = ORNS_NO;
/*
* If this is a multi-slot dnode there is a chance that this
* object will expand into a slot that is already used by
* another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode.
*/
if (dn_slots > 1) {
boolean_t need_sync = B_FALSE;
for (uint64_t slot = drro->drr_object + 1;
slot < drro->drr_object + dn_slots;
slot++) {
dmu_object_info_t slot_doi;
err = dmu_object_info(rwa->os, slot, &slot_doi);
if (err == ENOENT || err == EEXIST)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, slot);
if (err != 0)
return (err);
need_sync = B_TRUE;
}
if (need_sync)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
}
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object_to_hold);
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
if (object_to_hold == DMU_NEW_OBJECT) {
/* Currently free, wants to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
new_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* Currently allocated, but with different properties */
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, rwa->spill ?
DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
} else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
/*
* Currently allocated, the existing version of this object
* may reference a spill block that is no longer allocated
* at the source and needs to be freed.
*/
err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
}
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
if (rwa->or_crypt_params_present) {
/*
* Set the crypt params for the buffer associated with this
* range of dnodes. This causes the blkptr_t to have the
* same crypt params (byteorder, salt, iv, mac) as on the
* sending side.
*
* Since we are committing this tx now, it is possible for
* the dnode block to end up on-disk with the incorrect MAC,
* if subsequent objects in this block are received in a
* different txg. However, since the dataset is marked as
* inconsistent, no code paths will do a non-raw read (or
* decrypt the block / verify the MAC). The receive code and
* scrub code can safely do raw reads and verify the
* checksum. They don't need to verify the MAC.
*/
dmu_buf_t *db = NULL;
uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
dmu_buf_set_crypt_params(db, rwa->or_byteorder,
rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
dmu_buf_rele(db, FTAG);
rwa->or_crypt_params_present = B_FALSE;
}
dmu_object_set_checksum(rwa->os, drro->drr_object,
drro->drr_checksumtype, tx);
dmu_object_set_compress(rwa->os, drro->drr_object,
drro->drr_compress, tx);
/* handle more restrictive dnode structuring for raw recvs */
if (rwa->raw) {
/*
* Set the indirect block size, block shift, nlevels.
* This will not fail because we ensured all of the
* blocks were freed earlier if this is a new object.
* For non-new objects block size and indirect block
* shift cannot change and nlevels can only increase.
*/
ASSERT3U(new_blksz, ==, drro->drr_blksz);
VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, drro->drr_indblkshift, tx));
VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
drro->drr_nlevels, tx));
/*
* Set the maxblkid. This will always succeed because
* we freed all blocks beyond the new maxblkid above.
*/
VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
drro->drr_maxblkid, tx));
}
if (data != NULL) {
dmu_buf_t *db;
dnode_t *dn;
uint32_t flags = DMU_READ_NO_PREFETCH;
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
* DRR_OBJECT_RANGE record.
*/
if (rwa->byteswap && !rwa->raw) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drro->drr_bonustype);
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
DRR_OBJECT_PAYLOAD_SIZE(drro));
}
dmu_buf_rele(db, FTAG);
dnode_rele(dn, FTAG);
}
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
{
uint64_t obj;
int next_err = 0;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
obj < DN_MAX_OBJECT && next_err == 0;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
dmu_object_info_t doi;
int err;
err = dmu_object_info(rwa->os, obj, &doi);
if (err == ENOENT)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, obj);
if (err != 0)
return (err);
if (rwa->or_need_sync == ORNS_MAYBE)
rwa->or_need_sync = ORNS_YES;
}
if (next_err != ESRCH)
return (next_err);
return (0);
}
/*
* Note: if this fails, the caller will clean up any records left on the
* rwa->write_batch list.
*/
static int
flush_write_batch_impl(struct receive_writer_arg *rwa)
{
dnode_t *dn;
int err;
if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
return (SET_ERROR(EINVAL));
struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
last_drrw->drr_offset - first_drrw->drr_offset +
last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
dnode_rele(dn, FTAG);
return (err);
}
struct receive_record_arg *rrd;
while ((rrd = list_head(&rwa->write_batch)) != NULL) {
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
abd_t *abd = rrd->abd;
ASSERT3U(drrw->drr_object, ==, rwa->last_object);
if (drrw->drr_logical_size != dn->dn_datablksz) {
/*
* The WRITE record is larger than the object's block
* size. We must be receiving an incremental
* large-block stream into a dataset that previously did
* a non-large-block receive. Lightweight writes must
* be exactly one block, so we need to decompress the
* data (if compressed) and do a normal dmu_write().
*/
ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
if (DRR_WRITE_COMPRESSED(drrw)) {
abd_t *decomp_abd =
abd_alloc_linear(drrw->drr_logical_size,
B_FALSE);
err = zio_decompress_data(
drrw->drr_compressiontype,
abd, abd_to_buf(decomp_abd),
abd_get_size(abd),
abd_get_size(decomp_abd), NULL);
if (err == 0) {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(decomp_abd), tx);
}
abd_free(decomp_abd);
} else {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(abd), tx);
}
if (err == 0)
abd_free(abd);
} else {
zio_prop_t zp = {0};
dmu_write_policy(rwa->os, dn, 0, 0, &zp);
zio_flag_t zio_flags = 0;
if (rwa->raw) {
zp.zp_encrypt = B_TRUE;
zp.zp_compress = drrw->drr_compressiontype;
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
memcpy(zp.zp_salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
memcpy(zp.zp_iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
memcpy(zp.zp_mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
zp.zp_copies = MIN(zp.zp_copies,
SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (DRR_WRITE_COMPRESSED(drrw)) {
ASSERT3U(drrw->drr_compressed_size, >, 0);
ASSERT3U(drrw->drr_logical_size, >=,
drrw->drr_compressed_size);
zp.zp_compress = drrw->drr_compressiontype;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
} else if (rwa->byteswap) {
/*
* Note: compressed blocks never need to be
* byteswapped, because WRITE records for
* metadata blocks are never compressed. The
* exception is raw streams, which are written
* in the original byteorder, and the byteorder
* bit is preserved in the BP by setting
* zp_byteorder above.
*/
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(
abd_to_buf(abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
/*
* Since this data can't be read until the receive
* completes, we can do a "lightweight" write for
* improved performance.
*/
err = dmu_lightweight_write_by_dnode(dn,
drrw->drr_offset, abd, &zp, zio_flags, tx);
}
if (err != 0) {
/*
* This rrd is left on the list, so the caller will
* free it (and the abd).
*/
break;
}
/*
* Note: If the receive fails, we want the resume stream to
* start with the same record that we last successfully
* received (as opposed to the next record), so that we can
* verify that we are resuming from the correct location.
*/
save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
list_remove(&rwa->write_batch, rrd);
kmem_free(rrd, sizeof (*rrd));
}
dmu_tx_commit(tx);
dnode_rele(dn, FTAG);
return (err);
}
noinline static int
flush_write_batch(struct receive_writer_arg *rwa)
{
if (list_is_empty(&rwa->write_batch))
return (0);
int err = rwa->err;
if (err == 0)
err = flush_write_batch_impl(rwa);
if (err != 0) {
struct receive_record_arg *rrd;
while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
abd_free(rrd->abd);
kmem_free(rrd, sizeof (*rrd));
}
}
ASSERT(list_is_empty(&rwa->write_batch));
return (err);
}
noinline static int
receive_process_write_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err = 0;
ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (SET_ERROR(EINVAL));
if (rwa->heal) {
blkptr_t *bp;
dmu_buf_t *dbp;
dnode_t *dn;
int flags = DB_RF_CANFAIL;
if (rwa->raw)
flags |= DB_RF_NO_DECRYPT;
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
drrw->drr_offset, FTAG, &dbp);
if (err != 0)
return (err);
/* Try to read the object to see if it needs healing */
err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
/*
* We only try to heal when dbuf_read() returns a ECKSUMs.
* Other errors (even EIO) get returned to caller.
* EIO indicates that the device is not present/accessible,
* so writing to it will likely fail.
* If the block is healthy, we don't want to overwrite it
* unnecessarily.
*/
if (err != ECKSUM) {
dmu_buf_rele(dbp, FTAG);
return (err);
}
dn = dmu_buf_dnode_enter(dbp);
/* Make sure the on-disk block and recv record sizes match */
if (drrw->drr_logical_size !=
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT) {
err = ENOTSUP;
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/* Get the block pointer for the corrupted block */
bp = dmu_buf_get_blkptr(dbp);
err = do_corrective_recv(rwa, drrw, rrd, bp);
dmu_buf_dnode_exit(dbp);
dmu_buf_rele(dbp, FTAG);
return (err);
}
/*
* For resuming to work, records must be in increasing order
* by (object, offset).
*/
if (drrw->drr_object < rwa->last_object ||
(drrw->drr_object == rwa->last_object &&
drrw->drr_offset < rwa->last_offset)) {
return (SET_ERROR(EINVAL));
}
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
uint64_t batch_size =
MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
if (first_rrd != NULL &&
(drrw->drr_object != first_drrw->drr_object ||
drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
err = flush_write_batch(rwa);
if (err != 0)
return (err);
}
rwa->last_object = drrw->drr_object;
rwa->last_offset = drrw->drr_offset;
if (rwa->last_object > rwa->max_object)
rwa->max_object = rwa->last_object;
list_insert_tail(&rwa->write_batch, rrd);
/*
* Return EAGAIN to indicate that we will use this rrd again,
* so the caller should not free it
*/
return (EAGAIN);
}
static int
receive_write_embedded(struct receive_writer_arg *rwa,
struct drr_write_embedded *drrwe, void *data)
{
dmu_tx_t *tx;
int err;
if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
return (SET_ERROR(EINVAL));
if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
return (SET_ERROR(EINVAL));
if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
return (SET_ERROR(EINVAL));
if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
return (SET_ERROR(EINVAL));
if (rwa->raw)
return (SET_ERROR(EINVAL));
if (drrwe->drr_object > rwa->max_object)
rwa->max_object = drrwe->drr_object;
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrwe->drr_object,
drrwe->drr_offset, drrwe->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
dmu_write_embedded(rwa->os, drrwe->drr_object,
drrwe->drr_offset, data, drrwe->drr_etype,
drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
/* See comment in restore_write. */
save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
dmu_tx_commit(tx);
return (0);
}
static int
receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
abd_t *abd)
{
dmu_buf_t *db, *db_spill;
int err;
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
return (SET_ERROR(EINVAL));
/*
* This is an unmodified spill block which was added to the stream
* to resolve an issue with incorrectly removing spill blocks. It
* should be ignored by current versions of the code which support
* the DRR_FLAG_SPILL_BLOCK flag.
*/
if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
abd_free(abd);
return (0);
}
if (rwa->raw) {
if (!DMU_OT_IS_VALID(drrs->drr_type) ||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
drrs->drr_compressed_size == 0)
return (SET_ERROR(EINVAL));
}
if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrs->drr_object > rwa->max_object)
rwa->max_object = drrs->drr_object;
VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
&db_spill)) != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_abort(tx);
return (err);
}
/*
* Spill blocks may both grow and shrink. When a change in size
* occurs any existing dbuf must be updated to match the logical
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
dmu_buf_will_fill(db_spill, tx);
VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}
arc_buf_t *abuf;
if (rwa->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
rwa->byteswap;
abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
drrs->drr_object, byteorder, drrs->drr_salt,
drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
drrs->drr_compressed_size, drrs->drr_length,
drrs->drr_compressiontype, 0);
} else {
abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
DMU_OT_IS_METADATA(drrs->drr_type),
drrs->drr_length);
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrs->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
DRR_SPILL_PAYLOAD_SIZE(drrs));
}
}
memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_commit(tx);
return (0);
}
noinline static int
receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
int err;
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (SET_ERROR(EINVAL));
if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrf->drr_object > rwa->max_object)
rwa->max_object = drrf->drr_object;
err = dmu_free_long_range(rwa->os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
return (err);
}
static int
receive_object_range(struct receive_writer_arg *rwa,
struct drr_object_range *drror)
{
/*
* By default, we assume this block is in our native format
* (ZFS_HOST_BYTEORDER). We then take into account whether
* the send stream is byteswapped (rwa->byteswap). Finally,
* we need to byteswap again if this particular block was
* in non-native format on the send side.
*/
boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
!!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
/*
* Since dnode block sizes are constant, we should not need to worry
* about making sure that the dnode block size is the same on the
* sending and receiving sides for the time being. For non-raw sends,
* this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
* record at all). Raw sends require this record type because the
* encryption parameters are used to protect an entire block of bonus
* buffers. If the size of dnode blocks ever becomes variable,
* handling will need to be added to ensure that dnode block sizes
* match on the sending and receiving side.
*/
if (drror->drr_numslots != DNODES_PER_BLOCK ||
P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
!rwa->raw)
return (SET_ERROR(EINVAL));
if (drror->drr_firstobj > rwa->max_object)
rwa->max_object = drror->drr_firstobj;
/*
* The DRR_OBJECT_RANGE handling must be deferred to receive_object()
* so that the block of dnodes is not written out when it's empty,
* and converted to a HOLE BP.
*/
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
rwa->or_need_sync = ORNS_MAYBE;
return (0);
}
/*
* Until we have the ability to redact large ranges of data efficiently, we
* process these records as frees.
*/
noinline static int
receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
{
struct drr_free drrf = {0};
drrf.drr_length = drrr->drr_length;
drrf.drr_object = drrr->drr_object;
drrf.drr_offset = drrr->drr_offset;
drrf.drr_toguid = drrr->drr_toguid;
return (receive_free(rwa, &drrf));
}
/* used to destroy the drc_ds on error */
static void
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
dsl_dataset_t *ds = drc->drc_ds;
ds_hold_flags_t dsflags;
dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
/*
* Wait for the txg sync before cleaning up the receive. For
* resumable receives, this ensures that our resume state has
* been written out to disk. For raw receives, this ensures
* that the user accounting code will not attempt to do anything
* after we stopped receiving the dataset.
*/
txg_wait_synced(ds->ds_dir->dd_pool, 0);
ds->ds_objset->os_raw_receive = B_FALSE;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (drc->drc_resumable && drc->drc_should_save &&
!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
char name[ZFS_MAX_DATASET_NAME_LEN];
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_name(ds, name);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
if (!drc->drc_heal)
(void) dsl_destroy_head(name);
}
}
static void
receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
{
if (drc->drc_byteswap) {
(void) fletcher_4_incremental_byteswap(buf, len,
&drc->drc_cksum);
} else {
(void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
}
}
/*
* Read the payload into a buffer of size len, and update the current record's
* payload field.
* Allocate drc->drc_next_rrd and read the next record's header into
* drc->drc_next_rrd->header.
* Verify checksum of payload and next record.
*/
static int
receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
{
int err;
if (len != 0) {
ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
err = receive_read(drc, len, buf);
if (err != 0)
return (err);
receive_cksum(drc, len, buf);
/* note: rrd is NULL when reading the begin record's payload */
if (drc->drc_rrd != NULL) {
drc->drc_rrd->payload = buf;
drc->drc_rrd->payload_size = len;
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
ASSERT3P(buf, ==, NULL);
}
drc->drc_prev_cksum = drc->drc_cksum;
drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
&drc->drc_next_rrd->header);
drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (err);
}
if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(EINVAL));
}
/*
* Note: checksum is of everything up to but not including the
* checksum itself.
*/
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
receive_cksum(drc,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&drc->drc_next_rrd->header);
zio_cksum_t cksum_orig =
drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
zio_cksum_t *cksump =
&drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
if (drc->drc_byteswap)
byteswap_record(&drc->drc_next_rrd->header);
if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
!ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(ECKSUM));
}
receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
return (0);
}
/*
* Issue the prefetch reads for any necessary indirect blocks.
*
* We use the object ignore list to tell us whether or not to issue prefetches
* for a given object. We do this for both correctness (in case the blocksize
* of an object has changed) and performance (if the object doesn't exist, don't
* needlessly try to issue prefetches). We also trim the list as we go through
* the stream to prevent it from growing to an unbounded size.
*
* The object numbers within will always be in sorted order, and any write
* records we see will also be in sorted order, but they're not sorted with
* respect to each other (i.e. we can get several object records before
* receiving each object's write records). As a result, once we've reached a
* given object number, we can safely remove any reference to lower object
* numbers in the ignore list. In practice, we receive up to 32 object records
* before receiving write records, so the list can have up to 32 nodes in it.
*/
static void
receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
uint64_t length)
{
if (!objlist_exists(drc->drc_ignore_objlist, object)) {
dmu_prefetch(drc->drc_os, object, 1, offset, length,
ZIO_PRIORITY_SYNC_READ);
}
}
/*
* Read records off the stream, issuing any necessary prefetches.
*/
static int
receive_read_record(dmu_recv_cookie_t *drc)
{
int err;
switch (drc->drc_rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro =
&drc->drc_rrd->header.drr_u.drr_object;
uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
void *buf = NULL;
dmu_object_info_t doi;
if (size != 0)
buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
/*
* See receive_read_prefetch for an explanation why we're
* storing this object in the ignore_obj_list.
*/
if (err == ENOENT || err == EEXIST ||
(err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
objlist_insert(drc->drc_ignore_objlist,
drro->drr_object);
err = 0;
}
return (err);
}
case DRR_FREEOBJECTS:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_WRITE:
{
struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0) {
abd_free(abd);
return (err);
}
drc->drc_rrd->abd = abd;
receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
drrw->drr_logical_size);
return (err);
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&drc->drc_rrd->header.drr_u.drr_write_embedded;
uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
void *buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
drrwe->drr_length);
return (err);
}
case DRR_FREE:
case DRR_REDACT:
{
/*
* It might be beneficial to prefetch indirect blocks here, but
* we don't really have the data to decide for sure.
*/
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_END:
{
struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
drre->drr_checksum))
return (SET_ERROR(ECKSUM));
return (0);
}
case DRR_SPILL:
{
struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0)
abd_free(abd);
else
drc->drc_rrd->abd = abd;
return (err);
}
case DRR_OBJECT_RANGE:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
default:
return (SET_ERROR(EINVAL));
}
}
static void
dprintf_drr(struct receive_record_arg *rrd, int err)
{
#ifdef ZFS_DEBUG
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
(u_longlong_t)drro->drr_object, drro->drr_type,
drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
(u_longlong_t)drrfo->drr_firstobj,
(u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
{
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
(u_longlong_t)drrw->drr_object, drrw->drr_type,
(u_longlong_t)drrw->drr_offset,
(u_longlong_t)drrw->drr_logical_size,
drrw->drr_checksumtype, drrw->drr_flags,
drrw->drr_compressiontype,
(u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
{
struct drr_write_byref *drrwbr =
&rrd->header.drr_u.drr_write_byref;
dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
(u_longlong_t)drrwbr->drr_object,
(u_longlong_t)drrwbr->drr_offset,
(u_longlong_t)drrwbr->drr_length,
(u_longlong_t)drrwbr->drr_toguid,
(u_longlong_t)drrwbr->drr_refguid,
(u_longlong_t)drrwbr->drr_refobject,
(u_longlong_t)drrwbr->drr_refoffset,
drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
(u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
(u_longlong_t)drrf->drr_object,
(u_longlong_t)drrf->drr_offset,
(longlong_t)drrf->drr_length,
err);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
"err = %d\n", (u_longlong_t)drrs->drr_object,
(u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
(u_longlong_t)drror->drr_firstobj,
(u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
default:
return;
}
#endif
}
/*
* Commit the records to the pool.
*/
static int
receive_process_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err;
/* Processing in order, therefore bytes_read should be increasing. */
ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
rwa->bytes_read = rrd->bytes_read;
/* We can only heal write records; other ones get ignored */
if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (0);
}
if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
err = flush_write_batch(rwa);
if (err != 0) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (err);
}
}
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
err = receive_object(rwa, drro, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
err = receive_freeobjects(rwa, drrfo);
break;
}
case DRR_WRITE:
{
err = receive_process_write_record(rwa, rrd);
if (rwa->heal) {
/*
* If healing - always free the abd after processing
*/
abd_free(rrd->abd);
rrd->abd = NULL;
} else if (err != EAGAIN) {
/*
* On success, a non-healing
* receive_process_write_record() returns
* EAGAIN to indicate that we do not want to free
* the rrd or arc_buf.
*/
ASSERT(err != 0);
abd_free(rrd->abd);
rrd->abd = NULL;
}
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
err = receive_write_embedded(rwa, drrwe, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
err = receive_free(rwa, drrf);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
err = receive_spill(rwa, drrs, rrd->abd);
if (err != 0)
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
err = receive_object_range(rwa, drror);
break;
}
case DRR_REDACT:
{
struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
err = receive_redact(rwa, drrr);
break;
}
default:
err = (SET_ERROR(EINVAL));
}
if (err != 0)
dprintf_drr(rrd, err);
return (err);
}
/*
* dmu_recv_stream's worker thread; pull records off the queue, and then call
* receive_process_record When we're done, signal the main thread and exit.
*/
static __attribute__((noreturn)) void
receive_writer_thread(void *arg)
{
struct receive_writer_arg *rwa = arg;
struct receive_record_arg *rrd;
fstrans_cookie_t cookie = spl_fstrans_mark();
for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
rrd = bqueue_dequeue(&rwa->q)) {
/*
* If there's an error, the main thread will stop putting things
* on the queue, but we need to clear everything in it before we
* can exit.
*/
int err = 0;
if (rwa->err == 0) {
err = receive_process_record(rwa, rrd);
} else if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
/*
* EAGAIN indicates that this record has been saved (on
* raw->write_batch), and will be used again, so we don't
* free it.
* When healing data we always need to free the record.
*/
if (err != EAGAIN || rwa->heal) {
if (rwa->err == 0)
rwa->err = err;
kmem_free(rrd, sizeof (*rrd));
}
}
kmem_free(rrd, sizeof (*rrd));
if (rwa->heal) {
zio_wait(rwa->heal_pio);
} else {
int err = flush_write_batch(rwa);
if (rwa->err == 0)
rwa->err = err;
}
mutex_enter(&rwa->mutex);
rwa->done = B_TRUE;
cv_signal(&rwa->cv);
mutex_exit(&rwa->mutex);
spl_fstrans_unmark(cookie);
thread_exit();
}
static int
resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
{
uint64_t val;
objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
uint64_t dsobj = dmu_objset_id(drc->drc_os);
uint64_t resume_obj, resume_off;
if (nvlist_lookup_uint64(begin_nvl,
"resume_object", &resume_obj) != 0 ||
nvlist_lookup_uint64(begin_nvl,
"resume_offset", &resume_off) != 0) {
return (SET_ERROR(EINVAL));
}
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
if (resume_obj != val)
return (SET_ERROR(EINVAL));
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
if (resume_off != val)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Read in the stream's records, one by one, and apply them to the pool. There
* are two threads involved; the thread that calls this function will spin up a
* worker thread, read the records off the stream one by one, and issue
* prefetches for any necessary indirect blocks. It will then push the records
* onto an internal blocking queue. The worker thread will pull the records off
* the queue, and actually write the data into the DMU. This way, the worker
* thread doesn't have to wait for reads to complete, since everything it needs
* (the indirect blocks) will be prefetched.
*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
{
int err = 0;
struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
uint64_t bytes = 0;
(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
sizeof (bytes), 1, &bytes);
drc->drc_bytes_read += bytes;
}
drc->drc_ignore_objlist = objlist_create();
/* these were verified in dmu_recv_begin */
ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
DMU_SUBSTREAM);
ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
ASSERT0(drc->drc_os->os_encrypted &&
(drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
/* handle DSL encryption key payload */
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
nvlist_t *keynvl = NULL;
ASSERT(drc->drc_os->os_encrypted);
ASSERT(drc->drc_raw);
err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
&keynvl);
if (err != 0)
goto out;
if (!drc->drc_heal) {
/*
* If this is a new dataset we set the key immediately.
* Otherwise we don't want to change the key until we
* are sure the rest of the receive succeeded so we
* stash the keynvl away until then.
*/
err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
drc->drc_ds->ds_object, drc->drc_fromsnapobj,
drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
if (err != 0)
goto out;
}
/* see comment in dmu_recv_end_sync() */
drc->drc_ivset_guid = 0;
(void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
&drc->drc_ivset_guid);
if (!drc->drc_newfs)
drc->drc_keynvl = fnvlist_dup(keynvl);
}
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = resume_check(drc, drc->drc_begin_nvl);
if (err != 0)
goto out;
}
/*
* For compatibility with recursive send streams, we do this here,
* rather than in dmu_recv_begin. If we pull the next header too
* early, and it's the END record, we break the `recv_skip` logic.
*/
if (drc->drc_drr_begin->drr_payloadlen == 0) {
err = receive_read_payload_and_next_header(drc, 0, NULL);
if (err != 0)
goto out;
}
/*
* If we failed before this point we will clean up any new resume
* state that was created. Now that we've gotten past the initial
* checks we are ok to retain that resume state.
*/
drc->drc_should_save = B_TRUE;
(void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
offsetof(struct receive_record_arg, node));
cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
rwa->os = drc->drc_os;
rwa->byteswap = drc->drc_byteswap;
rwa->heal = drc->drc_heal;
rwa->tofs = drc->drc_tofs;
rwa->resumable = drc->drc_resumable;
rwa->raw = drc->drc_raw;
rwa->spill = drc->drc_spill;
rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
rwa->os->os_raw_receive = drc->drc_raw;
if (drc->drc_heal) {
rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
ZIO_FLAG_GODFATHER);
}
list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
offsetof(struct receive_record_arg, node.bqn_node));
(void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
TS_RUN, minclsyspri);
/*
* We're reading rwa->err without locks, which is safe since we are the
* only reader, and the worker thread is the only writer. It's ok if we
* miss a write for an iteration or two of the loop, since the writer
* thread will keep freeing records we send it until we send it an eos
* marker.
*
* We can leave this loop in 3 ways: First, if rwa->err is
* non-zero. In that case, the writer thread will free the rrd we just
* pushed. Second, if we're interrupted; in that case, either it's the
* first loop and drc->drc_rrd was never allocated, or it's later, and
* drc->drc_rrd has been handed off to the writer thread who will free
* it. Finally, if receive_read_record fails or we're at the end of the
* stream, then we free drc->drc_rrd and exit.
*/
while (rwa->err == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = SET_ERROR(EINTR);
break;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
err = receive_read_record(drc);
if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
drc->drc_rrd = NULL;
break;
}
bqueue_enqueue(&rwa->q, drc->drc_rrd,
sizeof (struct receive_record_arg) +
drc->drc_rrd->payload_size);
drc->drc_rrd = NULL;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
mutex_enter(&rwa->mutex);
while (!rwa->done) {
/*
* We need to use cv_wait_sig() so that any process that may
* be sleeping here can still fork.
*/
(void) cv_wait_sig(&rwa->cv, &rwa->mutex);
}
mutex_exit(&rwa->mutex);
/*
* If we are receiving a full stream as a clone, all object IDs which
* are greater than the maximum ID referenced in the stream are
* by definition unused and must be freed.
*/
if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
uint64_t obj = rwa->max_object + 1;
int free_err = 0;
int next_err = 0;
while (next_err == 0) {
free_err = dmu_free_long_object(rwa->os, obj);
if (free_err != 0 && free_err != ENOENT)
break;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
}
if (err == 0) {
if (free_err != 0 && free_err != ENOENT)
err = free_err;
else if (next_err != ESRCH)
err = next_err;
}
}
cv_destroy(&rwa->cv);
mutex_destroy(&rwa->mutex);
bqueue_destroy(&rwa->q);
list_destroy(&rwa->write_batch);
if (err == 0)
err = rwa->err;
out:
/*
* If we hit an error before we started the receive_writer_thread
* we need to clean up the next_rrd we create by processing the
* DRR_BEGIN record.
*/
if (drc->drc_next_rrd != NULL)
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
/*
* The objset will be invalidated by dmu_recv_end() when we do
* dsl_dataset_clone_swap_sync_impl().
*/
drc->drc_os = NULL;
kmem_free(rwa, sizeof (*rwa));
nvlist_free(drc->drc_begin_nvl);
if (err != 0) {
/*
* Clean up references. If receive is not resumable,
* destroy what we created, so we don't leave it in
* the inconsistent state.
*/
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
}
objlist_destroy(drc->drc_ignore_objlist);
drc->drc_ignore_objlist = NULL;
*voffp = drc->drc_voff;
return (err);
}
static int
dmu_recv_end_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error;
ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
if (drc->drc_heal) {
error = 0;
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
if (error != 0)
return (error);
if (drc->drc_force) {
/*
* We will destroy any snapshots in tofs (i.e. before
* origin_head) that are after the origin (which is
* the snap before drc_ds, because drc_ds can not
* have any snaps of its own).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
break;
if (snap->ds_dir != origin_head->ds_dir)
error = SET_ERROR(EINVAL);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(
snap, B_FALSE);
}
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
if (error != 0)
break;
}
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
if (drc->drc_keynvl != NULL) {
error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
drc->drc_keynvl, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
origin_head, drc->drc_force, drc->drc_owner, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
error = dsl_dataset_snapshot_check_impl(origin_head,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
dsl_dataset_rele(origin_head, FTAG);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
} else {
error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
}
return (error);
}
static void
dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
uint64_t newsnapobj = 0;
spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
tx, "snap=%s", drc->drc_tosnap);
drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
if (drc->drc_heal) {
if (drc->drc_keynvl != NULL) {
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
} else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
&origin_head));
if (drc->drc_force) {
/*
* Destroy any snapshots of drc_tofs (origin_head)
* after the origin (the snap before drc_ds).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
&snap));
ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_destroy_snapshot_sync_impl(snap,
B_FALSE, tx);
dsl_dataset_rele(snap, FTAG);
}
}
if (drc->drc_keynvl != NULL) {
dsl_crypto_recv_raw_key_sync(drc->drc_ds,
drc->drc_keynvl, tx);
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
VERIFY3P(drc->drc_ds->ds_prev, ==,
origin_head->ds_prev);
dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
origin_head, tx);
/*
* The objset was evicted by dsl_dataset_clone_swap_sync_impl,
* so drc_os is no longer valid.
*/
drc->drc_os = NULL;
dsl_dataset_snapshot_sync_impl(origin_head,
drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
dsl_dataset_phys(origin_head)->ds_flags &=
~DS_FLAG_INCONSISTENT;
newsnapobj =
dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
dsl_dataset_rele(origin_head, FTAG);
dsl_destroy_head_sync_impl(drc->drc_ds, tx);
if (drc->drc_owner != NULL)
VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
} else {
dsl_dataset_t *ds = drc->drc_ds;
dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(ds->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(ds->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
if (dsl_dataset_has_resume_receive_state(ds)) {
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
}
newsnapobj =
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
}
/*
* If this is a raw receive, the crypt_keydata nvlist will include
* a to_ivset_guid for us to set on the new snapshot. This value
* will override the value generated by the snapshot code. However,
* this value may not be present, because older implementations of
* the raw send code did not include this value, and we are still
* allowed to receive them if the zfs_disable_ivset_guid_check
* tunable is set, in which case we will leave the newly-generated
* value.
*/
if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&drc->drc_ivset_guid, tx));
}
/*
* Release the hold from dmu_recv_begin. This must be done before
* we return to open context, so that when we free the dataset's dnode
* we can evict its bonus buffer. Since the dataset may be destroyed
* at this point (and therefore won't have a valid pointer to the spa)
* we release the key mapping manually here while we do have a valid
* pointer, if it exists.
*/
if (!drc->drc_raw && encrypted) {
(void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
drc->drc_ds->ds_object, drc->drc_ds);
}
dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
drc->drc_ds = NULL;
}
static int dmu_recv_end_modified_blocks = 3;
static int
dmu_recv_existing_end(dmu_recv_cookie_t *drc)
{
#ifdef _KERNEL
/*
* We will be destroying the ds; make sure its origin is unmounted if
* necessary.
*/
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(drc->drc_ds, name);
zfs_destroy_unmount_origin(name);
#endif
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
static int
dmu_recv_new_end(dmu_recv_cookie_t *drc)
{
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
int
dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
{
int error;
drc->drc_owner = owner;
if (drc->drc_newfs)
error = dmu_recv_new_end(drc);
else
error = dmu_recv_existing_end(drc);
if (error != 0) {
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
} else if (!drc->drc_heal) {
if (drc->drc_newfs) {
zvol_create_minor(drc->drc_tofs);
}
char *snapname = kmem_asprintf("%s@%s",
drc->drc_tofs, drc->drc_tosnap);
zvol_create_minor(snapname);
kmem_strfree(snapname);
}
return (error);
}
/*
* Return TRUE if this objset is currently being received into.
*/
boolean_t
dmu_objset_is_receiving(objset_t *os)
{
return (os->os_dsl_dataset != NULL &&
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
}
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW,
"Maximum receive queue length");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW,
"Receive queue fill fraction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
"Maximum amount of writes to batch into one transaction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
"Ignore errors during corrective receive");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index 5b7f5543ad09..2d37ed2cdfb5 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -1,3121 +1,3124 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
static int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
* completed, this variable might need to be increased. If instead the main
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
* this variable may need to be increased. If there is a significant slowdown
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
* If a lot of cpu time is being spent on cv_signal, then these should be tuned
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
static uint_t zfs_send_queue_ff = 20;
static uint_t zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
static uint_t zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
static int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
{
uint64_t temp = a * b;
if (b != 0 && temp / b != a)
return (B_FALSE);
*c = temp;
return (B_TRUE);
}
struct send_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
uint64_t *num_blocks_visited;
};
struct redact_list_thread_arg {
boolean_t cancel;
bqueue_t q;
zbookmark_phys_t resume;
redaction_list_t *rl;
boolean_t mark_redact;
int error_code;
uint64_t *num_blocks_visited;
};
struct send_merge_thread_arg {
bqueue_t q;
objset_t *os;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *redact_arg;
int error;
boolean_t cancel;
};
struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t object;
uint64_t start_blkid;
uint64_t end_blkid;
bqueue_node_t ln;
enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
PREVIOUSLY_REDACTED} type;
union {
struct srd {
dmu_object_type_t obj_type;
uint32_t datablksz; // logical size
uint32_t datasz; // payload size
blkptr_t bp;
arc_buf_t *abuf;
abd_t *abd;
kmutex_t lock;
kcondvar_t cv;
boolean_t io_outstanding;
boolean_t io_compressed;
int io_err;
} data;
struct srh {
uint32_t datablksz;
} hole;
struct sro {
/*
* This is a pointer because embedding it in the
* struct causes these structures to be massively larger
* for all range types; this makes the code much less
* memory efficient.
*/
dnode_phys_t *dnp;
blkptr_t bp;
} object;
struct srr {
uint32_t datablksz;
} redact;
struct sror {
blkptr_t bp;
} object_range;
} sru;
};
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free(),
* dump_freeobjects(), and dump_redact() can be aggregated into a single
* DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
*/
typedef enum {
PENDING_NONE,
PENDING_FREE,
PENDING_FREEOBJECTS,
PENDING_REDACT
} dmu_pendop_t;
typedef struct dmu_send_cookie {
dmu_replay_record_t *dsc_drr;
dmu_send_outparams_t *dsc_dso;
offset_t *dsc_off;
objset_t *dsc_os;
zio_cksum_t dsc_zc;
uint64_t dsc_toguid;
uint64_t dsc_fromtxg;
int dsc_err;
dmu_pendop_t dsc_pending_op;
uint64_t dsc_featureflags;
uint64_t dsc_last_data_object;
uint64_t dsc_last_data_offset;
uint64_t dsc_resume_object;
uint64_t dsc_resume_offset;
boolean_t dsc_sent_begin;
boolean_t dsc_sent_end;
} dmu_send_cookie_t;
static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
range_free(struct send_range *range)
{
if (range->type == OBJECT) {
size_t size = sizeof (dnode_phys_t) *
(range->sru.object.dnp->dn_extra_slots + 1);
kmem_free(range->sru.object.dnp, size);
} else if (range->type == DATA) {
mutex_enter(&range->sru.data.lock);
while (range->sru.data.io_outstanding)
cv_wait(&range->sru.data.cv, &range->sru.data.lock);
if (range->sru.data.abd != NULL)
abd_free(range->sru.data.abd);
if (range->sru.data.abuf != NULL) {
arc_buf_destroy(range->sru.data.abuf,
&range->sru.data.abuf);
}
mutex_exit(&range->sru.data.lock);
cv_destroy(&range->sru.data.cv);
mutex_destroy(&range->sru.data.lock);
}
kmem_free(range, sizeof (*range));
}
/*
* For all record types except BEGIN, fill in the checksum (overlaid in
* drr_u.drr_checksum.drr_checksum). The checksum verifies everything
* up to the start of the checksum itself.
*/
static int
dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
(void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&dscp->dsc_zc);
if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
dscp->dsc_sent_begin = B_TRUE;
} else {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
if (dscp->dsc_drr->drr_type == DRR_END) {
dscp->dsc_sent_end = B_TRUE;
}
(void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), &dscp->dsc_zc);
*dscp->dsc_off += sizeof (dmu_replay_record_t);
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
sizeof (dmu_replay_record_t), dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
*dscp->dsc_off += payload_len;
/*
* payload is null when dso_dryrun == B_TRUE (i.e. when we're
* doing a send size calculation)
*/
if (payload != NULL) {
(void) fletcher_4_incremental_native(
payload, payload_len, &dscp->dsc_zc);
}
/*
* The code does not rely on this (len being a multiple of 8).
* We keep this assertion because of the corresponding assertion
* in receive_read(). Keeping this assertion ensures that we do
* not inadvertently break backwards compatibility (causing the
* assertion in receive_read() to trigger on old software).
*
* Raw sends cannot be received on old software, and so can
* bypass this assertion.
*/
ASSERT((payload_len % 8 == 0) ||
(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
payload_len, dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
}
/*
* Fill in the drr_free struct, or perform aggregation if the previous record is
* also a free record, and the two are adjacent.
*
* Note that we send free records even for a full send, because we want to be
* able to receive a full send as a clone, which requires a list of all the free
* and freeobject records that were generated on the source.
*/
static int
dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* that the receiving system doesn't have any dbufs in the range
* being freed. This is always true because there is a one-record
* constraint: we only send one WRITE record for any given
* object,offset. We know that the one-record constraint is
* true because we always send data in increasing order by
* object,offset.
*
* If the increasing-order constraint ever changes, we should find
* another way to assert that the one-record constraint is still
* satisfied.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
if (offset + length < offset || length == UINT64_MAX)
drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
dscp->dsc_pending_op = PENDING_FREE;
}
return (0);
}
/*
* Fill in the drr_redact struct, or perform aggregation if the previous record
* is also a redaction record, and the two are adjacent.
*/
static int
dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
/*
* If there is a pending op, but it's not PENDING_REDACT, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_REDACT records can only be aggregated with
* other DRR_REDACT records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_REDACT) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_REDACT) {
/*
* Check to see whether this redacted block can be aggregated
* with pending one.
*/
if (drrr->drr_object == object && drrr->drr_offset +
drrr->drr_length == offset) {
drrr->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a REDACT record and make it pending */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
drrr->drr_length = length;
drrr->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_REDACT;
return (0);
}
static int
dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
uint64_t offset, int lsize, int psize, const blkptr_t *bp,
boolean_t io_compressed, void *data)
{
uint64_t payload_size;
boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
dscp->dsc_last_data_object = object;
dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
boolean_t compressed =
(bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
io_compressed : lsize != psize);
if (raw || compressed) {
ASSERT(bp != NULL);
ASSERT(raw || dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
if (raw) {
ASSERT(BP_IS_PROTECTED(bp));
/*
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
drrw->drr_iv);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
ASSERT3S(lsize, >=, psize);
}
/* set fields common to compressed and raw sends */
drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
drrw->drr_compressed_size = psize;
payload_size = drrw->drr_compressed_size;
} else {
payload_size = drrw->drr_logical_size;
}
if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
* plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
} else {
drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
ZCHECKSUM_FLAG_DEDUP)
drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
&(dscp->dsc_drr->drr_u.drr_write_embedded);
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
drrw->drr_psize = BPE_GET_PSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
uint32_t psize = drrw->drr_psize;
uint32_t rsize = P2ROUNDUP(psize, 8);
if (psize != rsize)
memset(buf + psize, 0, rsize - psize);
if (dump_record(dscp, buf, rsize) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
void *data)
{
struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
drrs->drr_toguid = dscp->dsc_toguid;
/* See comment in dump_dnode() for full details */
if (zfs_send_unmodified_spill_blocks &&
(bp->blk_birth <= dscp->dsc_fromtxg)) {
drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
}
/* handle raw send fields */
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
drrs->drr_compressed_size = BP_GET_PSIZE(bp);
zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
payload_size = drrs->drr_compressed_size;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
(DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* leading to zfs recv never completing. to avoid this issue, don't
* send FREEOBJECTS records for object IDs which cannot exist on the
* receiving side.
*/
if (maxobj > 0) {
if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
numobjs = maxobj - firstobj;
}
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
*/
if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
drrfo->drr_numobjs += numobjs;
return (0);
} else {
/* can't be aggregated. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
drrfo->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* this case it's unnecessary to send the dnodes prior to
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
return (dump_freeobjects(dscp, object, 1));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
drro->drr_toguid = dscp->dsc_toguid;
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;
/* needed for reconstructing dnp on recv side */
drro->drr_maxblkid = dnp->dn_maxblkid;
drro->drr_indblkshift = dnp->dn_indblkshift;
drro->drr_nlevels = dnp->dn_nlevels;
drro->drr_nblkptr = dnp->dn_nblkptr;
/*
* Since we encrypt the entire bonus area, the (raw) part
* beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
return (SET_ERROR(EINVAL));
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
/*
* DRR_OBJECT_SPILL is set for every dnode which references a
* spill block. This allows the receiving pool to definitively
* determine when a spill block should be kept or freed.
*/
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
drro->drr_flags |= DRR_OBJECT_SPILL;
if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
/*
* Send DRR_SPILL records for unmodified spill blocks. This is useful
* because changing certain attributes of the object (e.g. blocksize)
* can cause old versions of ZFS to incorrectly remove a spill block.
* Including these records in the stream forces an up to date version
* to always be written ensuring they're never lost. Current versions
* of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
* ignore these unmodified spill blocks.
*/
if (zfs_send_unmodified_spill_blocks &&
(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
(DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
memset(&record, 0, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
record.start_blkid = DMU_SPILL_BLKID;
record.end_blkid = record.start_blkid + 1;
record.sru.data.bp = *bp;
record.sru.data.obj_type = dnp->dn_type;
record.sru.data.datablksz = BP_GET_LSIZE(bp);
if (do_dump(dscp, &record) != 0)
return (SET_ERROR(EINTR));
}
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
&(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
/*
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
!(featureflags & DMU_BACKUP_FEATURE_LZ4)))
return (B_FALSE);
/*
* If we have not set the ZSTD feature flag, we can't send ZSTD
* compressed embedded blocks, as the receiver may not support them.
*/
if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
!(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
* Embed type must be explicitly enabled.
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
return (B_FALSE);
}
return (B_FALSE);
}
/*
* This function actually handles figuring out what kind of record needs to be
* dumped, and calling the appropriate helper function. In most cases,
* the data has already been read by send_reader_thread().
*/
static int
do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
int err = 0;
switch (range->type) {
case OBJECT:
err = dump_dnode(dscp, &range->sru.object.bp, range->object,
range->sru.object.dnp);
return (err);
case OBJECT_RANGE: {
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
return (0);
}
uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
DNODE_SHIFT;
uint64_t firstobj = range->start_blkid * epb;
err = dump_object_range(dscp, &range->sru.object_range.bp,
firstobj, epb);
break;
}
case REDACT: {
struct srr *srrp = &range->sru.redact;
err = dump_redact(dscp, range->object, range->start_blkid *
srrp->datablksz, (range->end_blkid - range->start_blkid) *
srrp->datablksz);
return (err);
}
case DATA: {
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
spa_t *spa =
dmu_objset_spa(dscp->dsc_os);
ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (BP_GET_TYPE(bp) == DMU_OT_SA) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
zioflags |= ZIO_FLAG_RAW;
}
zbookmark_phys_t zb;
ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
zb.zb_objset = dmu_objset_id(dscp->dsc_os);
zb.zb_object = range->object;
zb.zb_level = 0;
zb.zb_blkid = range->start_blkid;
arc_buf_t *abuf = NULL;
if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb) != 0)
return (SET_ERROR(EIO));
err = dump_spill(dscp, bp, zb.zb_object,
(abuf == NULL ? NULL : abuf->b_data));
if (abuf != NULL)
arc_buf_destroy(abuf, &abuf);
return (err);
}
if (send_do_embed(bp, dscp->dsc_featureflags)) {
err = dump_write_embedded(dscp, range->object,
range->start_blkid * srdp->datablksz,
srdp->datablksz, bp);
return (err);
}
ASSERT(range->object > dscp->dsc_resume_object ||
(range->object == dscp->dsc_resume_object &&
range->start_blkid * srdp->datablksz >=
dscp->dsc_resume_offset));
/* it's a level-0 block of a regular object */
mutex_enter(&srdp->lock);
while (srdp->io_outstanding)
cv_wait(&srdp->cv, &srdp->lock);
err = srdp->io_err;
mutex_exit(&srdp->lock);
if (err != 0) {
if (zfs_send_corrupt_data &&
!dscp->dsc_dso->dso_dryrun) {
/*
* Send a block filled with 0x"zfs badd bloc"
*/
srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
ARC_BUFC_DATA, srdp->datablksz);
uint64_t *ptr;
for (ptr = srdp->abuf->b_data;
(char *)ptr < (char *)srdp->abuf->b_data +
srdp->datablksz; ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (SET_ERROR(EIO));
}
}
ASSERT(dscp->dsc_dso->dso_dryrun ||
srdp->abuf != NULL || srdp->abd != NULL);
uint64_t offset = range->start_blkid * srdp->datablksz;
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
ASSERT3P(srdp->abuf, ==, NULL);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
while (srdp->datablksz > 0 && err == 0) {
int n = MIN(srdp->datablksz,
SPA_OLD_MAXBLOCKSIZE);
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset, n, n, NULL, B_FALSE,
data);
offset += n;
/*
* When doing dry run, data==NULL is used as a
* sentinel value by
* dmu_dump_write()->dump_record().
*/
if (data != NULL)
data += n;
srdp->datablksz -= n;
}
} else {
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset,
srdp->datablksz, srdp->datasz, bp,
srdp->io_compressed, data);
}
return (err);
}
case HOLE: {
struct srh *srhp = &range->sru.hole;
if (range->object == DMU_META_DNODE_OBJECT) {
uint32_t span = srhp->datablksz >> DNODE_SHIFT;
uint64_t first_obj = range->start_blkid * span;
uint64_t numobj = range->end_blkid * span - first_obj;
return (dump_freeobjects(dscp, first_obj, numobj));
}
uint64_t offset = 0;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(range->start_blkid, srhp->datablksz,
&offset)) {
return (0);
}
uint64_t len = 0;
if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
len = UINT64_MAX;
len = len - offset;
return (dump_free(dscp, range->object, offset, len));
}
default:
panic("Invalid range type in do_dump: %d", range->type);
}
return (err);
}
static struct send_range *
range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
uint64_t end_blkid, boolean_t eos)
{
struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
range->type = type;
range->object = object;
range->start_blkid = start_blkid;
range->end_blkid = end_blkid;
range->eos_marker = eos;
if (type == DATA) {
range->sru.data.abd = NULL;
range->sru.data.abuf = NULL;
mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
range->sru.data.io_outstanding = 0;
range->sru.data.io_err = 0;
range->sru.data.io_compressed = B_FALSE;
}
return (range);
}
/*
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
(void) zilog;
struct send_thread_arg *sta = arg;
struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
spa_log_error(spa, zb, &bp->blk_birth);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(sta->os));
return (SET_ERROR(EIO));
}
if (sta->cancel)
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
atomic_inc_64(sta->num_blocks_visited);
if (zb->zb_level == ZB_DNODE_LEVEL) {
if (zb->zb_object == DMU_META_DNODE_OBJECT)
return (0);
record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
memcpy(record->sru.object.dnp, dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
!BP_IS_HOLE(bp)) {
record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
zb->zb_blkid + 1, B_FALSE);
record->sru.object_range.bp = *bp;
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
return (0);
if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
uint64_t start;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
span * zb->zb_blkid > dnp->dn_maxblkid)) {
ASSERT(BP_IS_HOLE(bp));
return (0);
}
if (zb->zb_blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
enum type record_type = DATA;
if (BP_IS_HOLE(bp))
record_type = HOLE;
else if (BP_IS_REDACTED(bp))
record_type = REDACT;
else
record_type = DATA;
record = range_alloc(record_type, zb->zb_object, start,
(start + span < start ? 0 : start + span), B_FALSE);
uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
if (BP_IS_HOLE(bp)) {
record->sru.hole.datablksz = datablksz;
} else if (BP_IS_REDACTED(bp)) {
record->sru.redact.datablksz = datablksz;
} else {
record->sru.data.datablksz = datablksz;
record->sru.data.obj_type = dnp->dn_type;
record->sru.data.bp = *bp;
}
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
struct redact_list_cb_arg {
uint64_t *num_blocks_visited;
bqueue_t *q;
boolean_t *cancel;
boolean_t mark_redact;
};
static int
redact_list_cb(redact_block_phys_t *rb, void *arg)
{
struct redact_list_cb_arg *rlcap = arg;
atomic_inc_64(rlcap->num_blocks_visited);
if (*rlcap->cancel)
return (-1);
struct send_range *data = range_alloc(REDACT, rb->rbp_object,
rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
if (rlcap->mark_redact) {
data->type = REDACT;
data->sru.redact.datablksz = redact_block_get_size(rb);
} else {
data->type = PREVIOUSLY_REDACTED;
}
bqueue_enqueue(rlcap->q, data, sizeof (*data));
return (0);
}
/*
* This function kicks off the traverse_dataset. It also handles setting the
* error code of the thread in case something goes wrong, and pushes the End of
* Stream record when the traverse_dataset call has finished.
*/
static __attribute__((noreturn)) void
send_traverse_thread(void *arg)
{
struct send_thread_arg *st_arg = arg;
int err = 0;
struct send_range *data;
fstrans_cookie_t cookie = spl_fstrans_mark();
err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
st_arg->fromtxg, &st_arg->resume,
st_arg->flags, send_cb, st_arg);
if (err != EINTR)
st_arg->error_code = err;
data = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Utility function that causes End of Stream records to compare after of all
* others, so that other threads' comparison logic can stay simple.
*/
static int __attribute__((unused))
send_range_after(const struct send_range *from, const struct send_range *to)
{
if (from->eos_marker == B_TRUE)
return (1);
if (to->eos_marker == B_TRUE)
return (-1);
uint64_t from_obj = from->object;
uint64_t from_end_obj = from->object + 1;
uint64_t to_obj = to->object;
uint64_t to_end_obj = to->object + 1;
if (from_obj == 0) {
ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (to_obj == 0) {
ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (from_end_obj <= to_obj)
return (-1);
if (from_obj >= to_end_obj)
return (1);
int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
if (unlikely(cmp))
return (cmp);
if (from->end_blkid <= to->start_blkid)
return (-1);
if (from->start_blkid >= to->end_blkid)
return (1);
return (0);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, but do not free the old data. This is used so that the
* records can be sent on to the main thread without copying the data.
*/
static struct send_range *
get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = bqueue_dequeue(bq);
ASSERT3S(send_range_after(prev, next), ==, -1);
return (next);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, and free the old data.
*/
static struct send_range *
get_next_range(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = get_next_range_nofree(bq, prev);
range_free(prev);
return (next);
}
static __attribute__((noreturn)) void
redact_list_thread(void *arg)
{
struct redact_list_thread_arg *rlt_arg = arg;
struct send_range *record;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (rlt_arg->rl != NULL) {
struct redact_list_cb_arg rlcba = {0};
rlcba.cancel = &rlt_arg->cancel;
rlcba.q = &rlt_arg->q;
rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
rlcba.mark_redact = rlt_arg->mark_redact;
int err = dsl_redaction_list_traverse(rlt_arg->rl,
&rlt_arg->resume, redact_list_cb, &rlcba);
if (err != EINTR)
rlt_arg->error_code = err;
}
record = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Compare the start point of the two provided ranges. End of stream ranges
* compare last, objects compare before any data or hole inside that object and
* multi-object holes that start at the same object.
*/
static int
send_range_start_compare(struct send_range *r1, struct send_range *r2)
{
uint64_t r1_objequiv = r1->object;
uint64_t r1_l0equiv = r1->start_blkid;
uint64_t r2_objequiv = r2->object;
uint64_t r2_l0equiv = r2->start_blkid;
int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
if (unlikely(cmp))
return (cmp);
if (r1->object == 0) {
r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
r1_l0equiv = 0;
}
if (r2->object == 0) {
r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
r2_l0equiv = 0;
}
cmp = TREE_CMP(r1_objequiv, r2_objequiv);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
if (unlikely(cmp))
return (cmp);
return (TREE_CMP(r1_l0equiv, r2_l0equiv));
}
enum q_idx {
REDACT_IDX = 0,
TO_IDX,
FROM_IDX,
NUM_THREADS
};
/*
* This function returns the next range the send_merge_thread should operate on.
* The inputs are two arrays; the first one stores the range at the front of the
* queues stored in the second one. The ranges are sorted in descending
* priority order; the metadata from earlier ranges overrules metadata from
* later ranges. out_mask is used to return which threads the ranges came from;
* bit i is set if ranges[i] started at the same place as the returned range.
*
* This code is not hardcoded to compare a specific number of threads; it could
* be used with any number, just by changing the q_idx enum.
*
* The "next range" is the one with the earliest start; if two starts are equal,
* the highest-priority range is the next to operate on. If a higher-priority
* range starts in the middle of the first range, then the first range will be
* truncated to end where the higher-priority range starts, and we will operate
* on that one next time. In this way, we make sure that each block covered by
* some range gets covered by a returned range, and each block covered is
* returned using the metadata of the highest-priority range it appears in.
*
* For example, if the three ranges at the front of the queues were [2,4),
* [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
* from the third range, [2,4) with the metadata from the first range, and then
* [4,5) with the metadata from the second.
*/
static struct send_range *
find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
{
int idx = 0; // index of the range with the earliest start
int i;
uint64_t bmask = 0;
for (i = 1; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
idx = i;
}
if (ranges[idx]->eos_marker) {
struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
*out_mask = 0;
return (ret);
}
/*
* Find all the ranges that start at that same point.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
bmask |= 1 << i;
}
*out_mask = bmask;
/*
* OBJECT_RANGE records only come from the TO thread, and should always
* be treated as overlapping with nothing and sent on immediately. They
* are only used in raw sends, and are never redacted.
*/
if (ranges[idx]->type == OBJECT_RANGE) {
ASSERT3U(idx, ==, TO_IDX);
ASSERT3U(*out_mask, ==, 1 << TO_IDX);
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Find the first start or end point after the start of the first range.
*/
uint64_t first_change = ranges[idx]->end_blkid;
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || ranges[i]->eos_marker ||
ranges[i]->object > ranges[idx]->object ||
ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
if (first_change > ranges[i]->start_blkid &&
(bmask & (1 << i)) == 0)
first_change = ranges[i]->start_blkid;
else if (first_change > ranges[i]->end_blkid)
first_change = ranges[i]->end_blkid;
}
/*
* Update all ranges to no longer overlap with the range we're
* returning. All such ranges must start at the same place as the range
* being returned, and end at or after first_change. Thus we update
* their start to first_change. If that makes them size 0, then free
* them and pull a new range from that thread.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || (bmask & (1 << i)) == 0)
continue;
ASSERT3U(first_change, >, ranges[i]->start_blkid);
ranges[i]->start_blkid = first_change;
ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
if (ranges[i]->start_blkid == ranges[i]->end_blkid)
ranges[i] = get_next_range(qs[i], ranges[i]);
}
/*
* Short-circuit the simple case; if the range doesn't overlap with
* anything else, or it only overlaps with things that start at the same
* place and are longer, send it on.
*/
if (first_change == ranges[idx]->end_blkid) {
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Otherwise, return a truncated copy of ranges[idx] and move the start
* of ranges[idx] back to first_change.
*/
struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
*ret = *ranges[idx];
ret->end_blkid = first_change;
ranges[idx]->start_blkid = first_change;
return (ret);
}
#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
/*
* Merge the results from the from thread and the to thread, and then hand the
* records off to send_prefetch_thread to prefetch them. If this is not a
* send from a redaction bookmark, the from thread will push an end of stream
* record and stop, and we'll just send everything that was changed in the
* to_ds since the ancestor's creation txg. If it is, then since
* traverse_dataset has a canonical order, we can compare each change as
* they're pulled off the queues. That will give us a stream that is
* appropriately sorted, and covers all records. In addition, we pull the
* data from the redact_list_thread and use that to determine which blocks
* should be redacted.
*/
static __attribute__((noreturn)) void
send_merge_thread(void *arg)
{
struct send_merge_thread_arg *smt_arg = arg;
struct send_range *front_ranges[NUM_THREADS];
bqueue_t *queues[NUM_THREADS];
int err = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (smt_arg->redact_arg == NULL) {
front_ranges[REDACT_IDX] =
kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
front_ranges[REDACT_IDX]->type = REDACT;
queues[REDACT_IDX] = NULL;
} else {
front_ranges[REDACT_IDX] =
bqueue_dequeue(&smt_arg->redact_arg->q);
queues[REDACT_IDX] = &smt_arg->redact_arg->q;
}
front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
queues[TO_IDX] = &smt_arg->to_arg->q;
front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
queues[FROM_IDX] = &smt_arg->from_arg->q;
uint64_t mask = 0;
struct send_range *range;
for (range = find_next_range(front_ranges, queues, &mask);
!range->eos_marker && err == 0 && !smt_arg->cancel;
range = find_next_range(front_ranges, queues, &mask)) {
/*
* If the range in question was in both the from redact bookmark
* and the bookmark we're using to redact, then don't send it.
* It's already redacted on the receiving system, so a redaction
* record would be redundant.
*/
if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
ASSERT3U(range->type, ==, REDACT);
range_free(range);
continue;
}
bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
if (smt_arg->to_arg->error_code != 0) {
err = smt_arg->to_arg->error_code;
} else if (smt_arg->from_arg->error_code != 0) {
err = smt_arg->from_arg->error_code;
} else if (smt_arg->redact_arg != NULL &&
smt_arg->redact_arg->error_code != 0) {
err = smt_arg->redact_arg->error_code;
}
}
if (smt_arg->cancel && err == 0)
err = SET_ERROR(EINTR);
smt_arg->error = err;
if (smt_arg->error != 0) {
smt_arg->to_arg->cancel = B_TRUE;
smt_arg->from_arg->cancel = B_TRUE;
if (smt_arg->redact_arg != NULL)
smt_arg->redact_arg->cancel = B_TRUE;
}
for (int i = 0; i < NUM_THREADS; i++) {
while (!front_ranges[i]->eos_marker) {
front_ranges[i] = get_next_range(queues[i],
front_ranges[i]);
}
range_free(front_ranges[i]);
}
range->eos_marker = B_TRUE;
bqueue_enqueue_flush(&smt_arg->q, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
struct send_reader_thread_arg {
struct send_merge_thread_arg *smta;
bqueue_t q;
boolean_t cancel;
boolean_t issue_reads;
uint64_t featureflags;
int error;
};
static void
dmu_send_read_done(zio_t *zio)
{
struct send_range *range = zio->io_private;
mutex_enter(&range->sru.data.lock);
if (zio->io_error != 0) {
abd_free(range->sru.data.abd);
range->sru.data.abd = NULL;
range->sru.data.io_err = zio->io_error;
}
ASSERT(range->sru.data.io_outstanding);
range->sru.data.io_outstanding = B_FALSE;
cv_broadcast(&range->sru.data.cv);
mutex_exit(&range->sru.data.lock);
}
static void
issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
objset_t *os = srta->smta->os;
ASSERT3U(range->type, ==, DATA);
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
/*
* If we have large blocks stored on disk but
* the send flags don't allow us to send large
* blocks, we split the data from the arc buf
* into chunks.
*/
boolean_t split_large_blocks =
srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
* the following are true:
* - stream compression was requested
* - we aren't splitting large blocks into smaller chunks
* - the data won't need to be byteswapped before sending
* - this isn't an embedded block
* - this isn't metadata (if receiving on a different endian
* system it can be byteswapped more easily)
*/
boolean_t request_compressed =
(srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
!split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
!BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
zioflags |= ZIO_FLAG_RAW;
srdp->io_compressed = B_TRUE;
} else if (request_compressed) {
zioflags |= ZIO_FLAG_RAW_COMPRESS;
srdp->io_compressed = B_TRUE;
}
srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
if (!srta->issue_reads)
return;
if (BP_IS_REDACTED(bp))
return;
if (send_do_embed(bp, srta->featureflags))
return;
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(os),
.zb_object = range->object,
.zb_level = 0,
.zb_blkid = range->start_blkid,
};
arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
int arc_err = arc_read(NULL, os->os_spa, bp,
arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb);
/*
* If the data is not already cached in the ARC, we read directly
* from zio. This avoids the performance overhead of adding a new
* entry to the ARC, and we also avoid polluting the ARC cache with
* data that is not likely to be used in the future.
*/
if (arc_err != 0) {
srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
srdp->io_outstanding = B_TRUE;
zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
srdp->datasz, dmu_send_read_done, range,
ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
}
/*
* Create a new record with the given values.
*/
static void
enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
(BP_IS_REDACTED(bp) ? REDACT : DATA));
struct send_range *range = range_alloc(range_type, dn->dn_object,
blkid, blkid + count, B_FALSE);
if (blkid == DMU_SPILL_BLKID) {
ASSERT3P(bp, !=, NULL);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
}
switch (range_type) {
case HOLE:
range->sru.hole.datablksz = datablksz;
break;
case DATA:
ASSERT3U(count, ==, 1);
range->sru.data.datablksz = datablksz;
range->sru.data.obj_type = dn->dn_type;
range->sru.data.bp = *bp;
issue_data_read(srta, range);
break;
case REDACT:
range->sru.redact.datablksz = datablksz;
break;
default:
break;
}
bqueue_enqueue(q, range, datablksz);
}
/*
* This thread is responsible for two things: First, it retrieves the correct
* blkptr in the to ds if we need to send the data because of something from
* the from thread. As a result of this, we're the first ones to discover that
* some indirect blocks can be discarded because they're not holes. Second,
* it issues prefetches for the data we need to send.
*/
static __attribute__((noreturn)) void
send_reader_thread(void *arg)
{
struct send_reader_thread_arg *srta = arg;
struct send_merge_thread_arg *smta = srta->smta;
bqueue_t *inq = &smta->q;
bqueue_t *outq = &srta->q;
objset_t *os = smta->os;
fstrans_cookie_t cookie = spl_fstrans_mark();
struct send_range *range = bqueue_dequeue(inq);
int err = 0;
/*
* If the record we're analyzing is from a redaction bookmark from the
* fromds, then we need to know whether or not it exists in the tods so
* we know whether to create records for it or not. If it does, we need
* the datablksz so we can generate an appropriate record for it.
* Finally, if it isn't redacted, we need the blkptr so that we can send
* a WRITE record containing the actual data.
*/
uint64_t last_obj = UINT64_MAX;
uint64_t last_obj_exists = B_TRUE;
while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
err == 0) {
switch (range->type) {
case DATA:
issue_data_read(srta, range);
bqueue_enqueue(outq, range, range->sru.data.datablksz);
range = get_next_range_nofree(inq, range);
break;
case HOLE:
case OBJECT:
case OBJECT_RANGE:
case REDACT: // Redacted blocks must exist
bqueue_enqueue(outq, range, sizeof (*range));
range = get_next_range_nofree(inq, range);
break;
case PREVIOUSLY_REDACTED: {
/*
* This entry came from the "from bookmark" when
* sending from a bookmark that has a redaction
* list. We need to check if this object/blkid
* exists in the target ("to") dataset, and if
* not then we drop this entry. We also need
* to fill in the block pointer so that we know
* what to prefetch.
*
* To accomplish the above, we first cache whether or
* not the last object we examined exists. If it
* doesn't, we can drop this record. If it does, we hold
* the dnode and use it to call dbuf_dnode_findbp. We do
* this instead of dbuf_bookmark_findbp because we will
* often operate on large ranges, and holding the dnode
* once is more efficient.
*/
boolean_t object_exists = B_TRUE;
/*
* If the data is redacted, we only care if it exists,
* so that we don't send records for objects that have
* been deleted.
*/
dnode_t *dn;
if (range->object == last_obj && !last_obj_exists) {
/*
* If we're still examining the same object as
* previously, and it doesn't exist, we don't
* need to call dbuf_bookmark_findbp.
*/
object_exists = B_FALSE;
} else {
err = dnode_hold(os, range->object, FTAG, &dn);
if (err == ENOENT) {
object_exists = B_FALSE;
err = 0;
}
last_obj = range->object;
last_obj_exists = object_exists;
}
if (err != 0) {
break;
} else if (!object_exists) {
/*
* The block was modified, but doesn't
* exist in the to dataset; if it was
* deleted in the to dataset, then we'll
* visit the hole bp for it at some point.
*/
range = get_next_range(inq, range);
continue;
}
uint64_t file_max =
MIN(dn->dn_maxblkid, range->end_blkid);
/*
* The object exists, so we need to try to find the
* blkptr for each block in the range we're processing.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
for (uint64_t blkid = range->start_blkid;
blkid < file_max; blkid++) {
blkptr_t bp;
uint32_t datablksz =
dn->dn_phys->dn_datablkszsec <<
SPA_MINBLOCKSHIFT;
uint64_t offset = blkid * datablksz;
/*
* This call finds the next non-hole block in
* the object. This is to prevent a
* performance problem where we're unredacting
* a large hole. Using dnode_next_offset to
* skip over the large hole avoids iterating
* over every block in it.
*/
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&offset, 1, 1, 0);
if (err == ESRCH) {
offset = UINT64_MAX;
err = 0;
} else if (err != 0) {
break;
}
if (offset != blkid * datablksz) {
/*
* if there is a hole from here
* (blkid) to offset
*/
offset = MIN(offset, file_max *
datablksz);
uint64_t nblks = (offset / datablksz) -
blkid;
enqueue_range(srta, outq, dn, blkid,
nblks, NULL, datablksz);
blkid += nblks;
}
if (blkid >= file_max)
break;
err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
NULL, NULL);
if (err != 0)
break;
ASSERT(!BP_IS_HOLE(&bp));
enqueue_range(srta, outq, dn, blkid, 1, &bp,
datablksz);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
range = get_next_range(inq, range);
}
}
}
if (srta->cancel || err != 0) {
smta->cancel = B_TRUE;
srta->error = err;
} else if (smta->error != 0) {
srta->error = smta->error;
}
while (!range->eos_marker)
range = get_next_range(inq, range);
bqueue_enqueue_flush(outq, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
struct dmu_send_params {
/* Pool args */
const void *tag; // Tag dp was held with, will be used to release dp.
dsl_pool_t *dp;
/* To snapshot args */
const char *tosnap;
dsl_dataset_t *to_ds;
/* From snapshot args */
zfs_bookmark_phys_t ancestor_zb;
uint64_t *fromredactsnaps;
/* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
uint64_t numfromredactsnaps;
/* Stream params */
boolean_t is_clone;
boolean_t embedok;
boolean_t large_block_ok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj;
uint64_t resumeoff;
uint64_t saved_guid;
zfs_bookmark_phys_t *redactbook;
/* Stream output params */
dmu_send_outparams_t *dso;
/* Stream progress params */
offset_t *off;
int outfd;
char saved_toname[MAXNAMELEN];
};
static int
setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
uint64_t *featureflags)
{
dsl_dataset_t *to_ds = dspp->to_ds;
dsl_pool_t *dp = dspp->dp;
-#ifdef _KERNEL
+
if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
return (SET_ERROR(EINVAL));
if (version >= ZPL_VERSION_SA)
*featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
-#endif
/* raw sends imply large_block_ok */
if ((dspp->rawok || dspp->large_block_ok) &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
}
/* encrypted datasets will not have embedded blocks */
if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
*featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
/* raw send implies compressok */
if (dspp->compressok || dspp->rawok)
*featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
if (dspp->rawok && os->os_encrypted)
*featureflags |= DMU_BACKUP_FEATURE_RAW;
if ((*featureflags &
(DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
DMU_BACKUP_FEATURE_RAW)) != 0 &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
* We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
* allow sending ZSTD compressed datasets to a receiver that does not
* support ZSTD
*/
if ((*featureflags &
(DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_ZSTD;
}
if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
*featureflags |= DMU_BACKUP_FEATURE_RESUMING;
}
if (dspp->redactbook != NULL) {
*featureflags |= DMU_BACKUP_FEATURE_REDACTED;
}
if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
return (0);
}
static dmu_replay_record_t *
create_begin_record(struct dmu_send_params *dspp, objset_t *os,
uint64_t featureflags)
{
dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
KM_SLEEP);
drr->drr_type = DRR_BEGIN;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
dsl_dataset_t *to_ds = dspp->to_ds;
drrb->drr_magic = DMU_BACKUP_MAGIC;
drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
drrb->drr_type = dmu_objset_type(os);
drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
if (dspp->is_clone)
drrb->drr_flags |= DRR_FLAG_CLONE;
if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drrb->drr_flags |= DRR_FLAG_CI_DATA;
if (zfs_send_set_freerecords_bit)
drrb->drr_flags |= DRR_FLAG_FREERECORDS;
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
if (dspp->savedok) {
drrb->drr_toguid = dspp->saved_guid;
strlcpy(drrb->drr_toname, dspp->saved_toname,
sizeof (drrb->drr_toname));
} else {
dsl_dataset_name(to_ds, drrb->drr_toname);
if (!to_ds->ds_is_snapshot) {
(void) strlcat(drrb->drr_toname, "@--head--",
sizeof (drrb->drr_toname));
}
}
return (drr);
}
static void
setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
to_arg->error_code = 0;
to_arg->cancel = B_FALSE;
to_arg->os = to_os;
to_arg->fromtxg = fromtxg;
to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
if (rawok)
to_arg->flags |= TRAVERSE_NO_DECRYPT;
if (zfs_send_corrupt_data)
to_arg->flags |= TRAVERSE_HARD;
to_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_from_thread(struct redact_list_thread_arg *from_arg,
redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
from_arg->error_code = 0;
from_arg->cancel = B_FALSE;
from_arg->rl = from_rl;
from_arg->mark_redact = B_FALSE;
from_arg->num_blocks_visited = &dssp->dss_blocks;
/*
* If from_ds is null, send_traverse_thread just returns success and
* enqueues an eos marker.
*/
(void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
if (dspp->redactbook == NULL)
return;
rlt_arg->cancel = B_FALSE;
VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
rlt_arg->error_code = 0;
rlt_arg->mark_redact = B_TRUE;
rlt_arg->rl = rl;
rlt_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_merge_thread(struct send_merge_thread_arg *smt_arg,
struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
objset_t *os)
{
VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
smt_arg->cancel = B_FALSE;
smt_arg->error = 0;
smt_arg->from_arg = from_arg;
smt_arg->to_arg = to_arg;
if (dspp->redactbook != NULL)
smt_arg->redact_arg = rlt_arg;
smt_arg->os = os;
(void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
TS_RUN, minclsyspri);
}
static void
setup_reader_thread(struct send_reader_thread_arg *srt_arg,
struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
uint64_t featureflags)
{
VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
srt_arg->smta = smt_arg;
srt_arg->issue_reads = !dspp->dso->dso_dryrun;
srt_arg->featureflags = featureflags;
(void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static int
setup_resume_points(struct dmu_send_params *dspp,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
struct redact_list_thread_arg *rlt_arg,
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
(void) smt_arg;
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
uint64_t obj = 0;
uint64_t blkid = 0;
if (resuming) {
obj = dspp->resumeobj;
dmu_object_info_t to_doi;
err = dmu_object_info(os, obj, &to_doi);
if (err != 0)
return (err);
blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
/*
* If we're resuming a redacted send, we can skip to the appropriate
* point in the redaction bookmark by binary searching through it.
*/
if (redact_rl != NULL) {
SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
* Note: If the resume point is in an object whose
* blocksize is different in the from vs to snapshots,
* we will have divided by the "wrong" blocksize.
* However, in this case fromsnap's send_cb() will
* detect that the blocksize has changed and therefore
* ignore this object.
*
* If we're resuming a send from a redaction bookmark,
* we still cannot accidentally suggest blocks behind
* the to_ds. In addition, we know that any blocks in
* the object in the to_ds will have to be sent, since
* the size changed. Therefore, we can't cause any harm
* this way either.
*/
SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
if (resuming) {
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
return (0);
}
static dmu_sendstatus_t *
setup_send_progress(struct dmu_send_params *dspp)
{
dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
dssp->dss_outfd = dspp->outfd;
dssp->dss_off = dspp->off;
dssp->dss_proc = curproc;
mutex_enter(&dspp->to_ds->ds_sendstream_lock);
list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
mutex_exit(&dspp->to_ds->ds_sendstream_lock);
return (dssp);
}
/*
* Actually do the bulk of the work in a zfs send.
*
* The idea is that we want to do a send from ancestor_zb to to_ds. We also
* want to not send any data that has been modified by all the datasets in
* redactsnaparr, and store the list of blocks that are redacted in this way in
* a bookmark named redactbook, created on the to_ds. We do this by creating
* several worker threads, whose function is described below.
*
* There are three cases.
* The first case is a redacted zfs send. In this case there are 5 threads.
* The first thread is the to_ds traversal thread: it calls dataset_traverse on
* the to_ds and finds all the blocks that have changed since ancestor_zb (if
* it's a full send, that's all blocks in the dataset). It then sends those
* blocks on to the send merge thread. The redact list thread takes the data
* from the redaction bookmark and sends those blocks on to the send merge
* thread. The send merge thread takes the data from the to_ds traversal
* thread, and combines it with the redaction records from the redact list
* thread. If a block appears in both the to_ds's data and the redaction data,
* the send merge thread will mark it as redacted and send it on to the prefetch
* thread. Otherwise, the send merge thread will send the block on to the
* prefetch thread unchanged. The prefetch thread will issue prefetch reads for
* any data that isn't redacted, and then send the data on to the main thread.
* The main thread behaves the same as in a normal send case, issuing demand
* reads for data blocks and sending out records over the network
*
* The graphic below diagrams the flow of data in the case of a redacted zfs
* send. Each box represents a thread, and each line represents the flow of
* data.
*
* Records from the |
* redaction bookmark |
* +--------------------+ | +---------------------------+
* | | v | Send Merge Thread |
* | Redact List Thread +----------> Apply redaction marks to |
* | | | records as specified by |
* +--------------------+ | redaction ranges |
* +----^---------------+------+
* | | Merged data
* | |
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since |
* ancestor_zb +------------v----+
* | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The second case is an incremental send from a redaction bookmark. The to_ds
* traversal thread and the main thread behave the same as in the redacted
* send case. The new thread is the from bookmark traversal thread. It
* iterates over the redaction list in the redaction bookmark, and enqueues
* records for each block that was redacted in the original send. The send
* merge thread now has to merge the data from the two threads. For details
* about that process, see the header comment of send_merge_thread(). Any data
* it decides to send on will be prefetched by the prefetch thread. Note that
* you can perform a redacted send from a redaction bookmark; in that case,
* the data flow behaves very similarly to the flow in the redacted send case,
* except with the addition of the bookmark traversal thread iterating over the
* redaction bookmark. The send_merge_thread also has to take on the
* responsibility of merging the redact list thread's records, the bookmark
* traversal thread's records, and the to_ds records.
*
* +---------------------+
* | |
* | Redact List Thread +--------------+
* | | |
* +---------------------+ |
* Blocks in redaction list | Ranges modified by every secure snap
* of from bookmark | (or EOS if not readcted)
* |
* +---------------------+ | +----v----------------------+
* | bookmark Traversal | v | Send Merge Thread |
* | Thread (finds +---------> Merges bookmark, rlt, and |
* | candidate blocks) | | to_ds send records |
* +---------------------+ +----^---------------+------+
* | | Merged data
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since +------------v----+
* ancestor_zb | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The final case is a simple zfs full or incremental send. The to_ds traversal
* thread behaves the same as always. The redact list thread is never started.
* The send merge thread takes all the blocks that the to_ds traversal thread
* sends it, prefetches the data, and sends the blocks on to the main thread.
* The main thread sends the data over the wire.
*
* To keep performance acceptable, we want to prefetch the data in the worker
* threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
* feature built into traverse_dataset, the combining and deletion of records
* due to redaction and sends from redaction bookmarks mean that we could
* issue many unnecessary prefetches. As a result, we only prefetch data
* after we've determined that the record is not going to be redacted. To
* prevent the prefetching from getting too far ahead of the main thread, the
* blocking queues that are used for communication are capped not by the
* number of entries in the queue, but by the sum of the size of the
* prefetches associated with them. The limit on the amount of data that the
* thread can prefetch beyond what the main thread has reached is controlled
* by the global variable zfs_send_queue_length. In addition, to prevent poor
* performance in the beginning of a send, we also limit the distance ahead
* that the traversal threads can be. That distance is controlled by the
* zfs_send_no_prefetch_queue_length tunable.
*
* Note: Releases dp using the specified tag.
*/
static int
dmu_send_impl(struct dmu_send_params *dspp)
{
objset_t *os;
dmu_replay_record_t *drr;
dmu_sendstatus_t *dssp;
dmu_send_cookie_t dsc = {0};
int err;
uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
uint64_t featureflags = 0;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *rlt_arg;
struct send_merge_thread_arg *smt_arg;
struct send_reader_thread_arg *srt_arg;
struct send_range *range;
redaction_list_t *from_rl = NULL;
redaction_list_t *redact_rl = NULL;
boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
boolean_t book_resuming = resuming;
dsl_dataset_t *to_ds = dspp->to_ds;
zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
dsl_pool_t *dp = dspp->dp;
const void *tag = dspp->tag;
err = dmu_objset_from_ds(to_ds, &os);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If this is a non-raw send of an encrypted ds, we can ensure that
* the objset_phys_t is authenticated. This is safe because this is
* either a snapshot or we have owned the dataset, ensuring that
* it can't be modified.
*/
if (!dspp->rawok && os->os_encrypted &&
arc_is_unauthenticated(os->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform(os->os_phys_buf, os->os_spa,
&zb, B_FALSE);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If we're doing a redacted send, hold the bookmark's redaction list.
*/
if (dspp->redactbook != NULL) {
err = dsl_redaction_list_hold_obj(dp,
dspp->redactbook->zbm_redaction_obj, FTAG,
&redact_rl);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
}
/*
* If we're sending from a redaction bookmark, hold the redaction list
* so that we can consider sending the redacted blocks.
*/
if (ancestor_zb->zbm_redaction_obj != 0) {
err = dsl_redaction_list_hold_obj(dp,
ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
if (err != 0) {
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, from_rl, FTAG);
}
dsl_dataset_long_hold(to_ds, FTAG);
from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
drr = create_begin_record(dspp, os, featureflags);
dssp = setup_send_progress(dspp);
dsc.dsc_drr = drr;
dsc.dsc_dso = dspp->dso;
dsc.dsc_os = os;
dsc.dsc_off = dspp->off;
dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
dsc.dsc_fromtxg = fromtxg;
dsc.dsc_pending_op = PENDING_NONE;
dsc.dsc_featureflags = featureflags;
dsc.dsc_resume_object = dspp->resumeobj;
dsc.dsc_resume_offset = dspp->resumeoff;
dsl_pool_rele(dp, tag);
void *payload = NULL;
size_t payload_len = 0;
nvlist_t *nvl = fnvlist_alloc();
/*
* If we're doing a redacted send, we include the snapshots we're
* redacted with respect to so that the target system knows what send
* streams can be correctly received on top of this dataset. If we're
* instead sending a redacted dataset, we include the snapshots that the
* dataset was created with respect to.
*/
if (dspp->redactbook != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
redact_rl->rl_phys->rlp_snaps,
redact_rl->rl_phys->rlp_num_snaps);
} else if (dsl_dataset_feature_is_active(to_ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t *tods_guids;
uint64_t length;
VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
length);
}
/*
* If we're sending from a redaction bookmark, then we should retrieve
* the guids of that bookmark so we can send them over the wire.
*/
if (from_rl != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
from_rl->rl_phys->rlp_snaps,
from_rl->rl_phys->rlp_num_snaps);
}
/*
* If the snapshot we're sending from is redacted, include the redaction
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
ASSERT3P(from_rl, ==, NULL);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
kmem_free(dspp->fromredactsnaps,
dspp->numfromredactsnaps * sizeof (uint64_t));
dspp->fromredactsnaps = NULL;
}
}
if (resuming || book_resuming) {
err = setup_resume_points(dspp, to_arg, from_arg,
rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
uint64_t ivset_guid = ancestor_zb->zbm_ivset_guid;
nvlist_t *keynvl = NULL;
ASSERT(os->os_encrypted);
err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
&keynvl);
if (err != 0) {
fnvlist_free(nvl);
goto out;
}
fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
fnvlist_free(keynvl);
}
if (!nvlist_empty(nvl)) {
payload = fnvlist_pack(nvl, &payload_len);
drr->drr_payloadlen = payload_len;
}
fnvlist_free(nvl);
err = dump_record(&dsc, payload, payload_len);
fnvlist_pack_free(payload, payload_len);
if (err != 0) {
err = dsc.dsc_err;
goto out;
}
setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
setup_from_thread(from_arg, from_rl, dssp);
setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
range = bqueue_dequeue(&srt_arg->q);
while (err == 0 && !range->eos_marker) {
err = do_dump(&dsc, range);
range = get_next_range(&srt_arg->q, range);
if (issig(JUSTLOOKING) && issig(FORREAL))
err = SET_ERROR(EINTR);
}
/*
* If we hit an error or are interrupted, cancel our worker threads and
* clear the queue of any pending records. The threads will pass the
* cancel up the tree of worker threads, and each one will clean up any
* pending records before exiting.
*/
if (err != 0) {
srt_arg->cancel = B_TRUE;
while (!range->eos_marker) {
range = get_next_range(&srt_arg->q, range);
}
}
range_free(range);
bqueue_destroy(&srt_arg->q);
bqueue_destroy(&smt_arg->q);
if (dspp->redactbook != NULL)
bqueue_destroy(&rlt_arg->q);
bqueue_destroy(&to_arg->q);
bqueue_destroy(&from_arg->q);
if (err == 0 && srt_arg->error != 0)
err = srt_arg->error;
if (err != 0)
goto out;
if (dsc.dsc_pending_op != PENDING_NONE)
if (dump_record(&dsc, NULL, 0) != 0)
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsc.dsc_err != 0)
err = dsc.dsc_err;
goto out;
}
/*
* Send the DRR_END record if this is not a saved stream.
* Otherwise, the omitted DRR_END record will signal to
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
memset(drr, 0, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
if (dump_record(&dsc, NULL, 0) != 0)
err = dsc.dsc_err;
}
out:
mutex_enter(&to_ds->ds_sendstream_lock);
list_remove(&to_ds->ds_sendstreams, dssp);
mutex_exit(&to_ds->ds_sendstream_lock);
VERIFY(err != 0 || (dsc.dsc_sent_begin &&
(dsc.dsc_sent_end || dspp->savedok)));
kmem_free(drr, sizeof (dmu_replay_record_t));
kmem_free(dssp, sizeof (dmu_sendstatus_t));
kmem_free(from_arg, sizeof (*from_arg));
kmem_free(to_arg, sizeof (*to_arg));
kmem_free(rlt_arg, sizeof (*rlt_arg));
kmem_free(smt_arg, sizeof (*smt_arg));
kmem_free(srt_arg, sizeof (*srt_arg));
dsl_dataset_long_rele(to_ds, FTAG);
if (from_rl != NULL) {
dsl_redaction_list_long_rele(from_rl, FTAG);
dsl_redaction_list_rele(from_rl, FTAG);
}
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
return (err);
}
int
dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err;
dsl_dataset_t *fromds;
ds_hold_flags_t dsflags;
struct dmu_send_params dspp = {0};
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.rawok = rawok;
dspp.savedok = savedok;
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
err = dsl_pool_hold(pool, FTAG, &dspp.dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (fromsnap != 0) {
err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
FTAG, &fromds);
if (err != 0) {
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
dspp.ancestor_zb.zbm_creation_txg =
dsl_dataset_phys(fromds)->ds_creation_txg;
dspp.ancestor_zb.zbm_creation_time =
dsl_dataset_phys(fromds)->ds_creation_time;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(dspp.dp->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&dspp.ancestor_zb.zbm_ivset_guid);
}
/* See dmu_send for the reasons behind this. */
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(fromds,
SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact, size);
}
boolean_t is_before =
dsl_dataset_is_before(dspp.to_ds, fromds, 0);
dspp.is_clone = (dspp.to_ds->ds_dir !=
fromds->ds_dir);
dsl_dataset_rele(fromds, FTAG);
if (!is_before) {
dsl_pool_rele(dspp.dp, FTAG);
err = SET_ERROR(EXDEV);
} else {
err = dmu_send_impl(&dspp);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (dspp.fromredactsnaps)
kmem_free(dspp.fromredactsnaps,
dspp.numfromredactsnaps * sizeof (uint64_t));
dsl_dataset_rele(dspp.to_ds, FTAG);
return (err);
}
int
dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
const char *redactbook, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err = 0;
ds_hold_flags_t dsflags;
boolean_t owned = B_FALSE;
dsl_dataset_t *fromds = NULL;
zfs_bookmark_phys_t book = {0};
struct dmu_send_params dspp = {0};
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
dspp.tosnap = tosnap;
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.resumeobj = resumeobj;
dspp.resumeoff = resumeoff;
dspp.rawok = rawok;
dspp.savedok = savedok;
if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
if (err != 0)
return (err);
if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
* that it doesn't change by owning the dataset.
*/
if (savedok) {
/*
* We are looking for the dataset that represents the
* partially received send stream. If this stream was
* received as a new snapshot of an existing dataset,
* this will be saved in a hidden clone named
* "<pool>/<dataset>/%recv". Otherwise, the stream
* will be saved in the live dataset itself. In
* either case we need to use dsl_dataset_own_force()
* because the stream is marked as inconsistent,
* which would normally make it unavailable to be
* owned.
*/
char *name = kmem_asprintf("%s/%s", tosnap,
recv_clone_name);
err = dsl_dataset_own_force(dspp.dp, name, dsflags,
FTAG, &dspp.to_ds);
if (err == ENOENT) {
err = dsl_dataset_own_force(dspp.dp, tosnap,
dsflags, FTAG, &dspp.to_ds);
}
if (err == 0) {
+ owned = B_TRUE;
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1,
&dspp.saved_guid);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TONAME, 1,
sizeof (dspp.saved_toname),
dspp.saved_toname);
}
- if (err != 0)
+ /* Only disown if there was an error in the lookups */
+ if (owned && (err != 0))
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
kmem_strfree(name);
} else {
err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
FTAG, &dspp.to_ds);
+ if (err == 0)
+ owned = B_TRUE;
}
- owned = B_TRUE;
} else {
err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
}
if (err != 0) {
+ /* Note: dsl dataset is not owned at this point */
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (redactbook != NULL) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, tosnap, sizeof (path));
char *at = strchr(path, '@');
if (at == NULL) {
err = EINVAL;
} else {
(void) snprintf(at, sizeof (path) - (at - path), "#%s",
redactbook);
err = dsl_bookmark_lookup(dspp.dp, path,
NULL, &book);
dspp.redactbook = &book;
}
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
if (fromsnap != NULL) {
zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
int fsnamelen;
if (strpbrk(tosnap, "@#") != NULL)
fsnamelen = strpbrk(tosnap, "@#") - tosnap;
else
fsnamelen = strlen(tosnap);
/*
* If the fromsnap is in a different filesystem, then
* mark the send stream as a clone.
*/
if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
(fromsnap[fsnamelen] != '@' &&
fromsnap[fsnamelen] != '#')) {
dspp.is_clone = B_TRUE;
}
if (strchr(fromsnap, '@') != NULL) {
err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
&fromds);
if (err != 0) {
ASSERT3P(fromds, ==, NULL);
} else {
/*
* We need to make a deep copy of the redact
* snapshots of the from snapshot, because the
* array will be freed when we evict from_ds.
*/
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(
fromds, SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps =
NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size =
dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
memcpy(dspp.fromredactsnaps, fromredact,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
0)) {
err = SET_ERROR(EXDEV);
} else {
zb->zbm_creation_txg =
dsl_dataset_phys(fromds)->
ds_creation_txg;
zb->zbm_creation_time =
dsl_dataset_phys(fromds)->
ds_creation_time;
zb->zbm_guid =
dsl_dataset_phys(fromds)->ds_guid;
zb->zbm_redaction_obj = 0;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(
dspp.dp->dp_meta_objset,
fromds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1,
&zb->zbm_ivset_guid);
}
}
dsl_dataset_rele(fromds, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
zb);
if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
zb->zbm_guid ==
dsl_dataset_phys(dspp.to_ds)->ds_guid)
err = 0;
}
if (err == 0) {
/* dmu_send_impl will call dsl_pool_rele for us. */
err = dmu_send_impl(&dspp);
} else {
if (dspp.fromredactsnaps)
kmem_free(dspp.fromredactsnaps,
dspp.numfromredactsnaps *
sizeof (uint64_t));
dsl_pool_rele(dspp.dp, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
static int
dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
int err = 0;
uint64_t size;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
*/
uint64_t recordsize;
uint64_t record_count;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
/* Assume all (uncompressed) blocks are recordsize. */
if (zfs_override_estimate_recordsize != 0) {
recordsize = zfs_override_estimate_recordsize;
} else if (os->os_phys->os_type == DMU_OST_ZVOL) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
if (err != 0)
return (err);
record_count = uncompressed / recordsize;
/*
* If we're estimating a send size for a compressed stream, use the
* compressed data size to estimate the stream size. Otherwise, use the
* uncompressed data size.
*/
size = stream_compressed ? compressed : uncompressed;
/*
* Subtract out approximate space used by indirect blocks.
* Assume most space is used by data blocks (non-indirect, non-dnode).
* Assume no ditto blocks or internal fragmentation.
*
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block.
*/
size -= record_count * sizeof (blkptr_t);
/* Add in the space for the record associated with each block. */
size += record_count * sizeof (dmu_replay_record_t);
*sizep = size;
return (0);
}
int
dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
boolean_t saved, uint64_t *sizep)
{
int err;
dsl_dataset_t *ds = origds;
uint64_t uncomp, comp;
ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
ASSERT(fromds == NULL || frombook == NULL);
/*
* If this is a saved send we may actually be sending
* from the %recv clone used for resuming.
*/
if (saved) {
objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
uint64_t guid;
char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_name(origds, dsname);
(void) strcat(dsname, "/");
(void) strlcat(dsname, recv_clone_name, sizeof (dsname));
err = dsl_dataset_hold(origds->ds_dir->dd_pool,
dsname, FTAG, &ds);
if (err != ENOENT && err != 0) {
return (err);
} else if (err == ENOENT) {
ds = origds;
}
/* check that this dataset has partially received data */
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
}
/* tosnap must be a snapshot or the target of a saved send */
if (!ds->ds_is_snapshot && ds == origds)
return (SET_ERROR(EINVAL));
if (fromds != NULL) {
uint64_t used;
if (!fromds->ds_is_snapshot) {
err = SET_ERROR(EINVAL);
goto out;
}
if (!dsl_dataset_is_before(ds, fromds, 0)) {
err = SET_ERROR(EXDEV);
goto out;
}
err = dsl_dataset_space_written(fromds, ds, &used, &comp,
&uncomp);
if (err != 0)
goto out;
} else if (frombook != NULL) {
uint64_t used;
err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
&comp, &uncomp);
if (err != 0)
goto out;
} else {
uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
}
err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
stream_compressed, sizep);
/*
* Add the size of the BEGIN and END records to the estimate.
*/
*sizep += 2 * sizeof (dmu_replay_record_t);
out:
if (ds != origds)
dsl_dataset_rele(ds, FTAG);
return (err);
}
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
"Allow sending corrupt data");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW,
"Maximum send queue length");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
"Send unmodified spill blocks");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW,
"Maximum send queue length for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW,
"Send queue fill fraction");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW,
"Send queue fill fraction for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW,
"Override block size estimate with fixed size");
diff --git a/sys/contrib/openzfs/module/zfs/dmu_tx.c b/sys/contrib/openzfs/module/zfs/dmu_tx.c
index c4e274bd4c42..0eb8c17e331a 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_tx.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_tx.c
@@ -1,1579 +1,1578 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/zap_impl.h>
#include <sys/spa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfs_context.h>
#include <sys/trace_zfs.h>
typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
uint64_t arg1, uint64_t arg2);
dmu_tx_stats_t dmu_tx_stats = {
{ "dmu_tx_assigned", KSTAT_DATA_UINT64 },
{ "dmu_tx_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_error", KSTAT_DATA_UINT64 },
{ "dmu_tx_suspended", KSTAT_DATA_UINT64 },
{ "dmu_tx_group", KSTAT_DATA_UINT64 },
{ "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
{ "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_quota", KSTAT_DATA_UINT64 },
};
static kstat_t *dmu_tx_ksp;
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
{
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
tx->tx_dir = dd;
if (dd != NULL)
tx->tx_pool = dd->dd_pool;
list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
offsetof(dmu_tx_hold_t, txh_node));
list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
offsetof(dmu_tx_callback_t, dcb_node));
tx->tx_start = gethrtime();
return (tx);
}
dmu_tx_t *
dmu_tx_create(objset_t *os)
{
dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
tx->tx_objset = os;
return (tx);
}
dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
{
dmu_tx_t *tx = dmu_tx_create_dd(NULL);
TXG_VERIFY(dp->dp_spa, txg);
tx->tx_pool = dp;
tx->tx_txg = txg;
tx->tx_anyobj = TRUE;
return (tx);
}
int
dmu_tx_is_syncing(dmu_tx_t *tx)
{
return (tx->tx_anyobj);
}
int
dmu_tx_private_ok(dmu_tx_t *tx)
{
return (tx->tx_anyobj);
}
static dmu_tx_hold_t *
dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
uint64_t arg1, uint64_t arg2)
{
dmu_tx_hold_t *txh;
if (dn != NULL) {
(void) zfs_refcount_add(&dn->dn_holds, tx);
if (tx->tx_txg != 0) {
mutex_enter(&dn->dn_mtx);
/*
* dn->dn_assigned_txg == tx->tx_txg doesn't pose a
* problem, but there's no way for it to happen (for
* now, at least).
*/
ASSERT(dn->dn_assigned_txg == 0);
dn->dn_assigned_txg = tx->tx_txg;
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
}
}
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
txh->txh_tx = tx;
txh->txh_dnode = dn;
zfs_refcount_create(&txh->txh_space_towrite);
zfs_refcount_create(&txh->txh_memory_tohold);
txh->txh_type = type;
txh->txh_arg1 = arg1;
txh->txh_arg2 = arg2;
list_insert_tail(&tx->tx_holds, txh);
return (txh);
}
static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
{
dnode_t *dn = NULL;
dmu_tx_hold_t *txh;
int err;
if (object != DMU_NEW_OBJECT) {
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0) {
tx->tx_err = err;
return (NULL);
}
}
txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
if (dn != NULL)
dnode_rele(dn, FTAG);
return (txh);
}
void
dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
if (!dmu_tx_is_syncing(tx))
(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
}
/*
* This function reads specified data from disk. The specified data will
* be needed to perform the transaction -- i.e, it will be read after
* we do dmu_tx_assign(). There are two reasons that we read the data now
* (before dmu_tx_assign()):
*
* 1. Reading it now has potentially better performance. The transaction
* has not yet been assigned, so the TXG is not held open, and also the
* caller typically has less locks held when calling dmu_tx_hold_*() than
* after the transaction has been assigned. This reduces the lock (and txg)
* hold times, thus reducing lock contention.
*
* 2. It is easier for callers (primarily the ZPL) to handle i/o errors
* that are detected before they start making changes to the DMU state
* (i.e. now). Once the transaction has been assigned, and some DMU
* state has been changed, it can be difficult to recover from an i/o
* error (e.g. to undo the changes already made in memory at the DMU
* layer). Typically code to do so does not exist in the caller -- it
* assumes that the data has already been cached and thus i/o errors are
* not possible.
*
* It has been observed that the i/o initiated here can be a performance
* problem, and it appears to be optional, because we don't look at the
* data which is read. However, removing this read would only serve to
* move the work elsewhere (after the dmu_tx_assign()), where it may
* have a greater impact on performance (in addition to the impact on
* fault tolerance noted above).
*/
static int
dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
{
int err;
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold_level(dn, level, blkid, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL)
return (SET_ERROR(EIO));
/*
* PARTIAL_FIRST allows caching for uncacheable blocks. It will
* be cleared after dmu_buf_will_dirty() call dbuf_read() again.
*/
err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH |
(level == 0 ? DB_RF_PARTIAL_FIRST : 0));
dbuf_rele(db, FTAG);
return (err);
}
static void
dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
dnode_t *dn = txh->txh_dnode;
int err = 0;
if (len == 0)
return;
(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
if (dn == NULL)
return;
/*
* For i/o error checking, read the blocks that will be needed
* to perform the write: the first and last level-0 blocks (if
* they are not aligned, i.e. if they are partial-block writes),
* and all the level-1 blocks.
*/
if (dn->dn_maxblkid == 0) {
if (off < dn->dn_datablksz &&
(off > 0 || len < dn->dn_datablksz)) {
err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
} else {
zio_t *zio = zio_root(dn->dn_objset->os_spa,
NULL, NULL, ZIO_FLAG_CANFAIL);
/* first level-0 block */
uint64_t start = off >> dn->dn_datablkshift;
if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
err = dmu_tx_check_ioerr(zio, dn, 0, start);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
/* last level-0 block */
uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
if (end != start && end <= dn->dn_maxblkid &&
P2PHASE(off + len, dn->dn_datablksz)) {
err = dmu_tx_check_ioerr(zio, dn, 0, end);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
/* level-1 blocks */
if (dn->dn_nlevels > 1) {
int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (uint64_t i = (start >> shft) + 1;
i < end >> shft; i++) {
err = dmu_tx_check_ioerr(zio, dn, 1, i);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
}
err = zio_wait(zio);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
}
static void
dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
dnode_t *dn = txh->txh_dnode;
int err = 0;
if (len == 0)
return;
(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
if (dn == NULL)
return;
/*
* For i/o error checking, read the blocks that will be needed
* to perform the append; first level-0 block (if not aligned, i.e.
* if they are partial-block writes), no additional blocks are read.
*/
if (dn->dn_maxblkid == 0) {
if (off < dn->dn_datablksz &&
(off > 0 || len < dn->dn_datablksz)) {
err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
} else {
zio_t *zio = zio_root(dn->dn_objset->os_spa,
NULL, NULL, ZIO_FLAG_CANFAIL);
/* first level-0 block */
uint64_t start = off >> dn->dn_datablkshift;
if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
err = dmu_tx_check_ioerr(zio, dn, 0, start);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
err = zio_wait(zio);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
}
static void
dmu_tx_count_dnode(dmu_tx_hold_t *txh)
{
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
DNODE_MIN_SIZE, FTAG);
}
void
dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_WRITE, off, len);
if (txh != NULL) {
dmu_tx_count_write(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
void
dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
if (txh != NULL) {
dmu_tx_count_write(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
/*
* Should be used when appending to an object and the exact offset is unknown.
* The write must occur at or beyond the specified offset. Only the L0 block
* at provided offset will be prefetched.
*/
void
dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_APPEND, off, DMU_OBJECT_END);
if (txh != NULL) {
dmu_tx_count_append(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
void
dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END);
if (txh != NULL) {
dmu_tx_count_append(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
/*
* This function marks the transaction as being a "net free". The end
* result is that refquotas will be disabled for this transaction, and
* this transaction will be able to use half of the pool space overhead
* (see dsl_pool_adjustedsize()). Therefore this function should only
* be called for transactions that we expect will not cause a net increase
* in the amount of space used (but it's OK if that is occasionally not true).
*/
void
dmu_tx_mark_netfree(dmu_tx_t *tx)
{
tx->tx_netfree = B_TRUE;
}
static void
dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
dmu_tx_t *tx = txh->txh_tx;
dnode_t *dn = txh->txh_dnode;
int err;
ASSERT(tx->tx_txg == 0);
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
return;
if (len == DMU_OBJECT_END)
len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
/*
* For i/o error checking, we read the first and last level-0
* blocks if they are not aligned, and all the level-1 blocks.
*
* Note: dbuf_free_range() assumes that we have not instantiated
* any level-0 dbufs that will be completely freed. Therefore we must
* exercise care to not read or count the first and last blocks
* if they are blocksize-aligned.
*/
if (dn->dn_datablkshift == 0) {
if (off != 0 || len < dn->dn_datablksz)
dmu_tx_count_write(txh, 0, dn->dn_datablksz);
} else {
/* first block will be modified if it is not aligned */
if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
dmu_tx_count_write(txh, off, 1);
/* last block will be modified if it is not aligned */
if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
dmu_tx_count_write(txh, off + len, 1);
}
/*
* Check level-1 blocks.
*/
if (dn->dn_nlevels > 1) {
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
uint64_t start = off >> shift;
uint64_t end = (off + len) >> shift;
ASSERT(dn->dn_indblkshift != 0);
/*
* dnode_reallocate() can result in an object with indirect
* blocks having an odd data block size. In this case,
* just check the single block.
*/
if (dn->dn_datablkshift == 0)
start = end = 0;
zio_t *zio = zio_root(tx->tx_pool->dp_spa,
NULL, NULL, ZIO_FLAG_CANFAIL);
for (uint64_t i = start; i <= end; i++) {
uint64_t ibyte = i << shift;
err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
i = ibyte >> shift;
if (err == ESRCH || i > end)
break;
if (err != 0) {
tx->tx_err = err;
(void) zio_wait(zio);
return;
}
(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
1 << dn->dn_indblkshift, FTAG);
err = dmu_tx_check_ioerr(zio, dn, 1, i);
if (err != 0) {
tx->tx_err = err;
(void) zio_wait(zio);
return;
}
}
err = zio_wait(zio);
if (err != 0) {
tx->tx_err = err;
return;
}
}
}
void
dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_FREE, off, len);
if (txh != NULL) {
dmu_tx_count_dnode(txh);
dmu_tx_count_free(txh, off, len);
}
}
void
dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
if (txh != NULL) {
dmu_tx_count_dnode(txh);
dmu_tx_count_free(txh, off, len);
}
}
static void
dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
/*
* Reuse dmu_tx_count_free(), it does exactly what we need for clone.
*/
dmu_tx_count_free(txh, off, len);
}
void
dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len);
if (txh != NULL) {
dmu_tx_count_dnode(txh);
dmu_tx_count_clone(txh, off, len);
}
}
static void
dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
{
dmu_tx_t *tx = txh->txh_tx;
dnode_t *dn = txh->txh_dnode;
int err;
extern int zap_micro_max_size;
ASSERT(tx->tx_txg == 0);
dmu_tx_count_dnode(txh);
/*
* Modifying a almost-full microzap is around the worst case (128KB)
*
* If it is a fat zap, the worst case would be 7*16KB=112KB:
* - 3 blocks overwritten: target leaf, ptrtbl block, header block
* - 4 new blocks written if adding:
* - 2 blocks for possibly split leaves,
* - 2 grown ptrtbl blocks
*/
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
zap_micro_max_size, FTAG);
if (dn == NULL)
return;
ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
if (dn->dn_maxblkid == 0 || name == NULL) {
/*
* This is a microzap (only one block), or we don't know
* the name. Check the first block for i/o errors.
*/
err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
if (err != 0) {
tx->tx_err = err;
}
} else {
/*
* Access the name so that we'll check for i/o errors to
* the leaf blocks, etc. We ignore ENOENT, as this name
* may not yet exist.
*/
err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
if (err == EIO || err == ECKSUM || err == ENXIO) {
tx->tx_err = err;
}
}
}
void
dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_ZAP, add, (uintptr_t)name);
if (txh != NULL)
dmu_tx_hold_zap_impl(txh, name);
}
void
dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT(dn != NULL);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
if (txh != NULL)
dmu_tx_hold_zap_impl(txh, name);
}
void
dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
ASSERT(tx->tx_txg == 0);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_BONUS, 0, 0);
if (txh)
dmu_tx_count_dnode(txh);
}
void
dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
if (txh)
dmu_tx_count_dnode(txh);
}
void
dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
{
dmu_tx_hold_t *txh;
ASSERT(tx->tx_txg == 0);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
DMU_NEW_OBJECT, THT_SPACE, space, 0);
if (txh) {
(void) zfs_refcount_add_many(
&txh->txh_space_towrite, space, FTAG);
}
}
#ifdef ZFS_DEBUG
void
dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
{
boolean_t match_object = B_FALSE;
boolean_t match_offset = B_FALSE;
DB_DNODE_ENTER(db);
dnode_t *dn = DB_DNODE(db);
ASSERT(tx->tx_txg != 0);
ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
ASSERT3U(dn->dn_object, ==, db->db.db_object);
if (tx->tx_anyobj) {
DB_DNODE_EXIT(db);
return;
}
/* XXX No checking on the meta dnode for now */
if (db->db.db_object == DMU_META_DNODE_OBJECT) {
DB_DNODE_EXIT(db);
return;
}
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
match_object = TRUE;
if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
int datablkshift = dn->dn_datablkshift ?
dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
int shift = datablkshift + epbs * db->db_level;
uint64_t beginblk = shift >= 64 ? 0 :
(txh->txh_arg1 >> shift);
uint64_t endblk = shift >= 64 ? 0 :
((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
uint64_t blkid = db->db_blkid;
/* XXX txh_arg2 better not be zero... */
dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
txh->txh_type, (u_longlong_t)beginblk,
(u_longlong_t)endblk);
switch (txh->txh_type) {
case THT_WRITE:
if (blkid >= beginblk && blkid <= endblk)
match_offset = TRUE;
/*
* We will let this hold work for the bonus
* or spill buffer so that we don't need to
* hold it when creating a new object.
*/
if (blkid == DMU_BONUS_BLKID ||
blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
case THT_APPEND:
if (blkid >= beginblk && (blkid <= endblk ||
txh->txh_arg2 == DMU_OBJECT_END))
match_offset = TRUE;
/*
* THT_WRITE used for bonus and spill blocks.
*/
ASSERT(blkid != DMU_BONUS_BLKID &&
blkid != DMU_SPILL_BLKID);
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
case THT_FREE:
/*
* We will dirty all the level 1 blocks in
* the free range and perhaps the first and
* last level 0 block.
*/
if (blkid >= beginblk && (blkid <= endblk ||
txh->txh_arg2 == DMU_OBJECT_END))
match_offset = TRUE;
break;
case THT_SPILL:
if (blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
break;
case THT_BONUS:
if (blkid == DMU_BONUS_BLKID)
match_offset = TRUE;
break;
case THT_ZAP:
match_offset = TRUE;
break;
case THT_NEWOBJECT:
match_object = TRUE;
break;
case THT_CLONE:
if (blkid >= beginblk && blkid <= endblk)
match_offset = TRUE;
break;
default:
cmn_err(CE_PANIC, "bad txh_type %d",
txh->txh_type);
}
}
if (match_object && match_offset) {
DB_DNODE_EXIT(db);
return;
}
}
DB_DNODE_EXIT(db);
panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
(u_longlong_t)db->db.db_object, db->db_level,
(u_longlong_t)db->db_blkid);
}
#endif
/*
* If we can't do 10 iops, something is wrong. Let us go ahead
* and hit zfs_dirty_data_max.
*/
static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
/*
* We delay transactions when we've determined that the backend storage
* isn't able to accommodate the rate of incoming writes.
*
* If there is already a transaction waiting, we delay relative to when
* that transaction finishes waiting. This way the calculated min_time
* is independent of the number of threads concurrently executing
* transactions.
*
* If we are the only waiter, wait relative to when the transaction
* started, rather than the current time. This credits the transaction for
* "time already served", e.g. reading indirect blocks.
*
* The minimum time for a transaction to take is calculated as:
* min_time = scale * (dirty - min) / (max - dirty)
* min_time is then capped at zfs_delay_max_ns.
*
* The delay has two degrees of freedom that can be adjusted via tunables.
* The percentage of dirty data at which we start to delay is defined by
* zfs_delay_min_dirty_percent. This should typically be at or above
* zfs_vdev_async_write_active_max_dirty_percent so that we only start to
* delay after writing at full speed has failed to keep up with the incoming
* write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
* speaking, this variable determines the amount of delay at the midpoint of
* the curve.
*
* delay
* 10ms +-------------------------------------------------------------*+
* | *|
* 9ms + *+
* | *|
* 8ms + *+
* | * |
* 7ms + * +
* | * |
* 6ms + * +
* | * |
* 5ms + * +
* | * |
* 4ms + * +
* | * |
* 3ms + * +
* | * |
* 2ms + (midpoint) * +
* | | ** |
* 1ms + v *** +
* | zfs_delay_scale ----------> ******** |
* 0 +-------------------------------------*********----------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note that since the delay is added to the outstanding time remaining on the
* most recent transaction, the delay is effectively the inverse of IOPS.
* Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
* was chosen such that small changes in the amount of accumulated dirty data
* in the first 3/4 of the curve yield relatively small differences in the
* amount of delay.
*
* The effects can be easier to understand when the amount of delay is
* represented on a log scale:
*
* delay
* 100ms +-------------------------------------------------------------++
* + +
* | |
* + *+
* 10ms + *+
* + ** +
* | (midpoint) ** |
* + | ** +
* 1ms + v **** +
* + zfs_delay_scale ----------> ***** +
* | **** |
* + **** +
* 100us + ** +
* + * +
* | * |
* + * +
* 10us + * +
* + +
* | |
* + +
* +--------------------------------------------------------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note here that only as the amount of dirty data approaches its limit does
* the delay start to increase rapidly. The goal of a properly tuned system
* should be to keep the amount of dirty data out of that range by first
* ensuring that the appropriate limits are set for the I/O scheduler to reach
* optimal throughput on the backend storage, and then by changing the value
* of zfs_delay_scale to increase the steepness of the curve.
*/
static void
dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
{
dsl_pool_t *dp = tx->tx_pool;
uint64_t delay_min_bytes, wrlog;
hrtime_t wakeup, tx_time = 0, now;
/* Calculate minimum transaction time for the dirty data amount. */
delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
if (dirty > delay_min_bytes) {
/*
* The caller has already waited until we are under the max.
* We make them pass us the amount of dirty data so we don't
* have to handle the case of it being >= the max, which
* could cause a divide-by-zero if it's == the max.
*/
ASSERT3U(dirty, <, zfs_dirty_data_max);
tx_time = zfs_delay_scale * (dirty - delay_min_bytes) /
(zfs_dirty_data_max - dirty);
}
/* Calculate minimum transaction time for the TX_WRITE log size. */
wrlog = aggsum_upper_bound(&dp->dp_wrlog_total);
delay_min_bytes =
zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100;
if (wrlog >= zfs_wrlog_data_max) {
tx_time = zfs_delay_max_ns;
} else if (wrlog > delay_min_bytes) {
tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) /
(zfs_wrlog_data_max - wrlog), tx_time);
}
if (tx_time == 0)
return;
tx_time = MIN(tx_time, zfs_delay_max_ns);
now = gethrtime();
if (now > tx->tx_start + tx_time)
return;
DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
uint64_t, tx_time);
mutex_enter(&dp->dp_lock);
wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time);
dp->dp_last_wakeup = wakeup;
mutex_exit(&dp->dp_lock);
zfs_sleep_until(wakeup);
}
/*
* This routine attempts to assign the transaction to a transaction group.
* To do so, we must determine if there is sufficient free space on disk.
*
* If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
* on it), then it is assumed that there is sufficient free space,
* unless there's insufficient slop space in the pool (see the comment
* above spa_slop_shift in spa_misc.c).
*
* If it is not a "netfree" transaction, then if the data already on disk
* is over the allowed usage (e.g. quota), this will fail with EDQUOT or
* ENOSPC. Otherwise, if the current rough estimate of pending changes,
* plus the rough estimate of this transaction's changes, may exceed the
* allowed usage, then this will fail with ERESTART, which will cause the
* caller to wait for the pending changes to be written to disk (by waiting
* for the next TXG to open), and then check the space usage again.
*
* The rough estimate of pending changes is comprised of the sum of:
*
* - this transaction's holds' txh_space_towrite
*
* - dd_tempreserved[], which is the sum of in-flight transactions'
* holds' txh_space_towrite (i.e. those transactions that have called
* dmu_tx_assign() but not yet called dmu_tx_commit()).
*
* - dd_space_towrite[], which is the amount of dirtied dbufs.
*
* Note that all of these values are inflated by spa_get_worst_case_asize(),
* which means that we may get ERESTART well before we are actually in danger
* of running out of space, but this also mitigates any small inaccuracies
* in the rough estimate (e.g. txh_space_towrite doesn't take into account
* indirect blocks, and dd_space_towrite[] doesn't take into account changes
* to the MOS).
*
* Note that due to this algorithm, it is possible to exceed the allowed
* usage by one transaction. Also, as we approach the allowed usage,
* we will allow a very limited amount of changes into each TXG, thus
* decreasing performance.
*/
static int
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
{
spa_t *spa = tx->tx_pool->dp_spa;
ASSERT0(tx->tx_txg);
if (tx->tx_err) {
DMU_TX_STAT_BUMP(dmu_tx_error);
return (tx->tx_err);
}
if (spa_suspended(spa)) {
DMU_TX_STAT_BUMP(dmu_tx_suspended);
/*
* If the user has indicated a blocking failure mode
* then return ERESTART which will block in dmu_tx_wait().
* Otherwise, return EIO so that an error can get
* propagated back to the VOP calls.
*
* Note that we always honor the txg_how flag regardless
* of the failuremode setting.
*/
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
!(txg_how & TXG_WAIT))
return (SET_ERROR(EIO));
return (SET_ERROR(ERESTART));
}
if (!tx->tx_dirty_delayed &&
dsl_pool_need_wrlog_delay(tx->tx_pool)) {
tx->tx_wait_dirty = B_TRUE;
DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay);
return (SET_ERROR(ERESTART));
}
if (!tx->tx_dirty_delayed &&
dsl_pool_need_dirty_delay(tx->tx_pool)) {
tx->tx_wait_dirty = B_TRUE;
DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
return (SET_ERROR(ERESTART));
}
tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
tx->tx_needassign_txh = NULL;
/*
* NB: No error returns are allowed after txg_hold_open, but
* before processing the dnode holds, due to the
* dmu_tx_unassign() logic.
*/
uint64_t towrite = 0;
uint64_t tohold = 0;
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn != NULL) {
/*
* This thread can't hold the dn_struct_rwlock
* while assigning the tx, because this can lead to
* deadlock. Specifically, if this dnode is already
* assigned to an earlier txg, this thread may need
* to wait for that txg to sync (the ERESTART case
* below). The other thread that has assigned this
* dnode to an earlier txg prevents this txg from
* syncing until its tx can complete (calling
* dmu_tx_commit()), but it may need to acquire the
* dn_struct_rwlock to do so (e.g. via
* dmu_buf_hold*()).
*
* Note that this thread can't hold the lock for
* read either, but the rwlock doesn't record
* enough information to make that assertion.
*/
ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
mutex_enter(&dn->dn_mtx);
if (dn->dn_assigned_txg == tx->tx_txg - 1) {
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = txh;
DMU_TX_STAT_BUMP(dmu_tx_group);
return (SET_ERROR(ERESTART));
}
if (dn->dn_assigned_txg == 0)
dn->dn_assigned_txg = tx->tx_txg;
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
}
towrite += zfs_refcount_count(&txh->txh_space_towrite);
tohold += zfs_refcount_count(&txh->txh_memory_tohold);
}
/* needed allocation: worst-case estimate of write space */
uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
/* calculate memory footprint estimate */
uint64_t memory = towrite + tohold;
if (tx->tx_dir != NULL && asize != 0) {
int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
if (err != 0)
return (err);
}
DMU_TX_STAT_BUMP(dmu_tx_assigned);
return (0);
}
static void
dmu_tx_unassign(dmu_tx_t *tx)
{
if (tx->tx_txg == 0)
return;
txg_rele_to_quiesce(&tx->tx_txgh);
/*
* Walk the transaction's hold list, removing the hold on the
* associated dnode, and notifying waiters if the refcount drops to 0.
*/
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
txh && txh != tx->tx_needassign_txh;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn == NULL)
continue;
mutex_enter(&dn->dn_mtx);
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
dn->dn_assigned_txg = 0;
cv_broadcast(&dn->dn_notxholds);
}
mutex_exit(&dn->dn_mtx);
}
txg_rele_to_sync(&tx->tx_txgh);
tx->tx_lasttried_txg = tx->tx_txg;
tx->tx_txg = 0;
}
/*
* Assign tx to a transaction group; txg_how is a bitmask:
*
* If TXG_WAIT is set and the currently open txg is full, this function
* will wait until there's a new txg. This should be used when no locks
* are being held. With this bit set, this function will only fail if
* we're truly out of space (or over quota).
*
* If TXG_WAIT is *not* set and we can't assign into the currently open
* txg without blocking, this function will return immediately with
* ERESTART. This should be used whenever locks are being held. On an
* ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
* and try again.
*
* If TXG_NOTHROTTLE is set, this indicates that this tx should not be
* delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
* details on the throttle). This is used by the VFS operations, after
* they have already called dmu_tx_wait() (though most likely on a
* different tx).
*
* It is guaranteed that subsequent successful calls to dmu_tx_assign()
* will assign the tx to monotonically increasing txgs. Of course this is
* not strong monotonicity, because the same txg can be returned multiple
* times in a row. This guarantee holds both for subsequent calls from
* one thread and for multiple threads. For example, it is impossible to
* observe the following sequence of events:
*
* Thread 1 Thread 2
*
* dmu_tx_assign(T1, ...)
* 1 <- dmu_tx_get_txg(T1)
* dmu_tx_assign(T2, ...)
* 2 <- dmu_tx_get_txg(T2)
* dmu_tx_assign(T3, ...)
* 1 <- dmu_tx_get_txg(T3)
*/
int
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
{
int err;
ASSERT(tx->tx_txg == 0);
ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
/* If we might wait, we must not hold the config lock. */
IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
if ((txg_how & TXG_NOTHROTTLE))
tx->tx_dirty_delayed = B_TRUE;
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
dmu_tx_unassign(tx);
if (err != ERESTART || !(txg_how & TXG_WAIT))
return (err);
dmu_tx_wait(tx);
}
txg_rele_to_quiesce(&tx->tx_txgh);
return (0);
}
void
dmu_tx_wait(dmu_tx_t *tx)
{
spa_t *spa = tx->tx_pool->dp_spa;
dsl_pool_t *dp = tx->tx_pool;
hrtime_t before;
ASSERT(tx->tx_txg == 0);
ASSERT(!dsl_pool_config_held(tx->tx_pool));
before = gethrtime();
if (tx->tx_wait_dirty) {
uint64_t dirty;
/*
* dmu_tx_try_assign() has determined that we need to wait
* because we've consumed much or all of the dirty buffer
* space.
*/
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_total >= zfs_dirty_data_max)
DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
while (dp->dp_dirty_total >= zfs_dirty_data_max)
cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
dirty = dp->dp_dirty_total;
mutex_exit(&dp->dp_lock);
dmu_tx_delay(tx, dirty);
tx->tx_wait_dirty = B_FALSE;
/*
* Note: setting tx_dirty_delayed only has effect if the
* caller used TX_WAIT. Otherwise they are going to
* destroy this tx and try again. The common case,
* zfs_write(), uses TX_WAIT.
*/
tx->tx_dirty_delayed = B_TRUE;
} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
/*
* If the pool is suspended we need to wait until it
* is resumed. Note that it's possible that the pool
* has become active after this thread has tried to
* obtain a tx. If that's the case then tx_lasttried_txg
* would not have been set.
*/
txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
} else if (tx->tx_needassign_txh) {
dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
mutex_enter(&dn->dn_mtx);
while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = NULL;
} else {
/*
* If we have a lot of dirty data just wait until we sync
* out a TXG at which point we'll hopefully have synced
* a portion of the changes.
*/
txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
}
spa_tx_assign_add_nsecs(spa, gethrtime() - before);
}
static void
dmu_tx_destroy(dmu_tx_t *tx)
{
dmu_tx_hold_t *txh;
while ((txh = list_head(&tx->tx_holds)) != NULL) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
zfs_refcount_destroy_many(&txh->txh_space_towrite,
zfs_refcount_count(&txh->txh_space_towrite));
zfs_refcount_destroy_many(&txh->txh_memory_tohold,
zfs_refcount_count(&txh->txh_memory_tohold));
kmem_free(txh, sizeof (dmu_tx_hold_t));
if (dn != NULL)
dnode_rele(dn, tx);
}
list_destroy(&tx->tx_callbacks);
list_destroy(&tx->tx_holds);
kmem_free(tx, sizeof (dmu_tx_t));
}
void
dmu_tx_commit(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg != 0);
/*
* Go through the transaction's hold list and remove holds on
* associated dnodes, notifying waiters if no holds remain.
*/
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn == NULL)
continue;
mutex_enter(&dn->dn_mtx);
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
dn->dn_assigned_txg = 0;
cv_broadcast(&dn->dn_notxholds);
}
mutex_exit(&dn->dn_mtx);
}
if (tx->tx_tempreserve_cookie)
dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
if (!list_is_empty(&tx->tx_callbacks))
txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
if (tx->tx_anyobj == FALSE)
txg_rele_to_sync(&tx->tx_txgh);
dmu_tx_destroy(tx);
}
void
dmu_tx_abort(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg == 0);
/*
* Call any registered callbacks with an error code.
*/
if (!list_is_empty(&tx->tx_callbacks))
dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED));
dmu_tx_destroy(tx);
}
uint64_t
dmu_tx_get_txg(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg != 0);
return (tx->tx_txg);
}
dsl_pool_t *
dmu_tx_pool(dmu_tx_t *tx)
{
ASSERT(tx->tx_pool != NULL);
return (tx->tx_pool);
}
void
dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
{
dmu_tx_callback_t *dcb;
dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
dcb->dcb_func = func;
dcb->dcb_data = data;
list_insert_tail(&tx->tx_callbacks, dcb);
}
/*
* Call all the commit callbacks on a list, with a given error code.
*/
void
dmu_tx_do_callbacks(list_t *cb_list, int error)
{
dmu_tx_callback_t *dcb;
- while ((dcb = list_tail(cb_list)) != NULL) {
- list_remove(cb_list, dcb);
+ while ((dcb = list_remove_tail(cb_list)) != NULL) {
dcb->dcb_func(dcb->dcb_data, error);
kmem_free(dcb, sizeof (dmu_tx_callback_t));
}
}
/*
* Interface to hold a bunch of attributes.
* used for creating new files.
* attrsize is the total size of all attributes
* to be added during object creation
*
* For updating/adding a single attribute dmu_tx_hold_sa() should be used.
*/
/*
* hold necessary attribute name for attribute registration.
* should be a very rare case where this is needed. If it does
* happen it would only happen on the first write to the file system.
*/
static void
dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
{
if (!sa->sa_need_attr_registration)
return;
for (int i = 0; i != sa->sa_num_attrs; i++) {
if (!sa->sa_attr_table[i].sa_registered) {
if (sa->sa_reg_attr_obj)
dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
B_TRUE, sa->sa_attr_table[i].sa_name);
else
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
B_TRUE, sa->sa_attr_table[i].sa_name);
}
}
}
void
dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
THT_SPILL, 0, 0);
if (txh != NULL)
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
SPA_OLD_MAXBLOCKSIZE, FTAG);
}
void
dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
{
sa_os_t *sa = tx->tx_objset->os_sa;
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
if (tx->tx_objset->os_sa->sa_master_obj == 0)
return;
if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
} else {
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
}
dmu_tx_sa_registration_hold(sa, tx);
if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
return;
(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
THT_SPILL, 0, 0);
}
/*
* Hold SA attribute
*
* dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
*
* variable_size is the total size of all variable sized attributes
* passed to this function. It is not the total size of all
* variable size attributes that *may* exist on this object.
*/
void
dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
{
uint64_t object;
sa_os_t *sa = tx->tx_objset->os_sa;
ASSERT(hdl != NULL);
object = sa_handle_object(hdl);
dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
DB_DNODE_ENTER(db);
dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db));
DB_DNODE_EXIT(db);
if (tx->tx_objset->os_sa->sa_master_obj == 0)
return;
if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
}
dmu_tx_sa_registration_hold(sa, tx);
if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
ASSERT(tx->tx_txg == 0);
dmu_tx_hold_spill(tx, object);
} else {
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_have_spill) {
ASSERT(tx->tx_txg == 0);
dmu_tx_hold_spill(tx, object);
}
DB_DNODE_EXIT(db);
}
}
void
dmu_tx_init(void)
{
dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dmu_tx_ksp != NULL) {
dmu_tx_ksp->ks_data = &dmu_tx_stats;
kstat_install(dmu_tx_ksp);
}
}
void
dmu_tx_fini(void)
{
if (dmu_tx_ksp != NULL) {
kstat_delete(dmu_tx_ksp);
dmu_tx_ksp = NULL;
}
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dmu_tx_create);
EXPORT_SYMBOL(dmu_tx_hold_write);
EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_append);
EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_free);
EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_zap);
EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_bonus);
EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
EXPORT_SYMBOL(dmu_tx_abort);
EXPORT_SYMBOL(dmu_tx_assign);
EXPORT_SYMBOL(dmu_tx_wait);
EXPORT_SYMBOL(dmu_tx_commit);
EXPORT_SYMBOL(dmu_tx_mark_netfree);
EXPORT_SYMBOL(dmu_tx_get_txg);
EXPORT_SYMBOL(dmu_tx_callback_register);
EXPORT_SYMBOL(dmu_tx_do_callbacks);
EXPORT_SYMBOL(dmu_tx_hold_spill);
EXPORT_SYMBOL(dmu_tx_hold_sa_create);
EXPORT_SYMBOL(dmu_tx_hold_sa);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
index ffc012e6c217..b70459380c24 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
@@ -1,586 +1,585 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/arc_impl.h>
#include <sys/dnode.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_zfetch.h>
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/kstat.h>
#include <sys/wmsum.h>
/*
* This tunable disables predictive prefetch. Note that it leaves "prescient"
* prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
* prescient prefetch never issues i/os that end up not being needed,
* so it can't hurt performance.
*/
static int zfs_prefetch_disable = B_FALSE;
/* max # of streams per zfetch */
static unsigned int zfetch_max_streams = 8;
/* min time before stream reclaim */
static unsigned int zfetch_min_sec_reap = 1;
/* max time before stream delete */
static unsigned int zfetch_max_sec_reap = 2;
/* min bytes to prefetch per stream (default 4MB) */
static unsigned int zfetch_min_distance = 4 * 1024 * 1024;
/* max bytes to prefetch per stream (default 64MB) */
unsigned int zfetch_max_distance = 64 * 1024 * 1024;
/* max bytes to prefetch indirects for per stream (default 64MB) */
unsigned int zfetch_max_idistance = 64 * 1024 * 1024;
/* max number of bytes in an array_read in which we allow prefetching (1MB) */
uint64_t zfetch_array_rd_sz = 1024 * 1024;
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
kstat_named_t zfetchstat_misses;
kstat_named_t zfetchstat_max_streams;
kstat_named_t zfetchstat_io_issued;
kstat_named_t zfetchstat_io_active;
} zfetch_stats_t;
static zfetch_stats_t zfetch_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "max_streams", KSTAT_DATA_UINT64 },
{ "io_issued", KSTAT_DATA_UINT64 },
{ "io_active", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t zfetchstat_hits;
wmsum_t zfetchstat_misses;
wmsum_t zfetchstat_max_streams;
wmsum_t zfetchstat_io_issued;
aggsum_t zfetchstat_io_active;
} zfetch_sums;
#define ZFETCHSTAT_BUMP(stat) \
wmsum_add(&zfetch_sums.stat, 1)
#define ZFETCHSTAT_ADD(stat, val) \
wmsum_add(&zfetch_sums.stat, val)
static kstat_t *zfetch_ksp;
static int
zfetch_kstats_update(kstat_t *ksp, int rw)
{
zfetch_stats_t *zs = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
zs->zfetchstat_hits.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_hits);
zs->zfetchstat_misses.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_misses);
zs->zfetchstat_max_streams.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_max_streams);
zs->zfetchstat_io_issued.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_io_issued);
zs->zfetchstat_io_active.value.ui64 =
aggsum_value(&zfetch_sums.zfetchstat_io_active);
return (0);
}
void
zfetch_init(void)
{
wmsum_init(&zfetch_sums.zfetchstat_hits, 0);
wmsum_init(&zfetch_sums.zfetchstat_misses, 0);
wmsum_init(&zfetch_sums.zfetchstat_max_streams, 0);
wmsum_init(&zfetch_sums.zfetchstat_io_issued, 0);
aggsum_init(&zfetch_sums.zfetchstat_io_active, 0);
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zfetch_ksp != NULL) {
zfetch_ksp->ks_data = &zfetch_stats;
zfetch_ksp->ks_update = zfetch_kstats_update;
kstat_install(zfetch_ksp);
}
}
void
zfetch_fini(void)
{
if (zfetch_ksp != NULL) {
kstat_delete(zfetch_ksp);
zfetch_ksp = NULL;
}
wmsum_fini(&zfetch_sums.zfetchstat_hits);
wmsum_fini(&zfetch_sums.zfetchstat_misses);
wmsum_fini(&zfetch_sums.zfetchstat_max_streams);
wmsum_fini(&zfetch_sums.zfetchstat_io_issued);
ASSERT0(aggsum_value(&zfetch_sums.zfetchstat_io_active));
aggsum_fini(&zfetch_sums.zfetchstat_io_active);
}
/*
* This takes a pointer to a zfetch structure and a dnode. It performs the
* necessary setup for the zfetch structure, grokking data from the
* associated dnode.
*/
void
dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
{
if (zf == NULL)
return;
zf->zf_dnode = dno;
zf->zf_numstreams = 0;
list_create(&zf->zf_stream, sizeof (zstream_t),
offsetof(zstream_t, zs_node));
mutex_init(&zf->zf_lock, NULL, MUTEX_DEFAULT, NULL);
}
static void
dmu_zfetch_stream_fini(zstream_t *zs)
{
ASSERT(!list_link_active(&zs->zs_node));
zfs_refcount_destroy(&zs->zs_callers);
zfs_refcount_destroy(&zs->zs_refs);
kmem_free(zs, sizeof (*zs));
}
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
{
ASSERT(MUTEX_HELD(&zf->zf_lock));
list_remove(&zf->zf_stream, zs);
zf->zf_numstreams--;
membar_producer();
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
}
/*
* Clean-up state associated with a zfetch structure (e.g. destroy the
* streams). This doesn't free the zfetch_t itself, that's left to the caller.
*/
void
dmu_zfetch_fini(zfetch_t *zf)
{
zstream_t *zs;
mutex_enter(&zf->zf_lock);
while ((zs = list_head(&zf->zf_stream)) != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
list_destroy(&zf->zf_stream);
mutex_destroy(&zf->zf_lock);
zf->zf_dnode = NULL;
}
/*
* If there aren't too many active streams already, create one more.
* In process delete/reuse all streams without hits for zfetch_max_sec_reap.
* If needed, reuse oldest stream without hits for zfetch_min_sec_reap or ever.
* The "blkid" argument is the next block that we expect this stream to access.
*/
static void
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{
zstream_t *zs, *zs_next, *zs_old = NULL;
hrtime_t now = gethrtime(), t;
ASSERT(MUTEX_HELD(&zf->zf_lock));
/*
* Delete too old streams, reusing the first found one.
*/
t = now - SEC2NSEC(zfetch_max_sec_reap);
for (zs = list_head(&zf->zf_stream); zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
/*
* Skip if still active. 1 -- zf_stream reference.
*/
if (zfs_refcount_count(&zs->zs_refs) != 1)
continue;
if (zs->zs_atime > t)
continue;
if (zs_old)
dmu_zfetch_stream_remove(zf, zs);
else
zs_old = zs;
}
if (zs_old) {
zs = zs_old;
goto reuse;
}
/*
* The maximum number of streams is normally zfetch_max_streams,
* but for small files we lower it such that it's at least possible
* for all the streams to be non-overlapping.
*/
uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance));
if (zf->zf_numstreams >= max_streams) {
t = now - SEC2NSEC(zfetch_min_sec_reap);
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (zfs_refcount_count(&zs->zs_refs) != 1)
continue;
if (zs->zs_atime > t)
continue;
if (zs_old == NULL || zs->zs_atime < zs_old->zs_atime)
zs_old = zs;
}
if (zs_old) {
zs = zs_old;
goto reuse;
}
ZFETCHSTAT_BUMP(zfetchstat_max_streams);
return;
}
zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_fetch = zf;
zfs_refcount_create(&zs->zs_callers);
zfs_refcount_create(&zs->zs_refs);
/* One reference for zf_stream. */
zfs_refcount_add(&zs->zs_refs, NULL);
zf->zf_numstreams++;
list_insert_head(&zf->zf_stream, zs);
reuse:
zs->zs_blkid = blkid;
zs->zs_pf_dist = 0;
zs->zs_pf_start = blkid;
zs->zs_pf_end = blkid;
zs->zs_ipf_dist = 0;
zs->zs_ipf_start = blkid;
zs->zs_ipf_end = blkid;
/* Allow immediate stream reuse until first hit. */
zs->zs_atime = now - SEC2NSEC(zfetch_min_sec_reap);
zs->zs_missed = B_FALSE;
zs->zs_more = B_FALSE;
}
static void
dmu_zfetch_done(void *arg, uint64_t level, uint64_t blkid, boolean_t io_issued)
{
zstream_t *zs = arg;
if (io_issued && level == 0 && blkid < zs->zs_blkid)
zs->zs_more = B_TRUE;
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
aggsum_add(&zfetch_sums.zfetchstat_io_active, -1);
}
/*
* This is the predictive prefetch entry point. dmu_zfetch_prepare()
* associates dnode access specified with blkid and nblks arguments with
* prefetch stream, predicts further accesses based on that stats and returns
* the stream pointer on success. That pointer must later be passed to
* dmu_zfetch_run() to initiate the speculative prefetch for the stream and
* release it. dmu_zfetch() is a wrapper for simple cases when window between
* prediction and prefetch initiation is not needed.
* fetch_data argument specifies whether actual data blocks should be fetched:
* FALSE -- prefetch only indirect blocks for predicted data blocks;
* TRUE -- prefetch predicted data blocks plus following indirect blocks.
*/
zstream_t *
dmu_zfetch_prepare(zfetch_t *zf, uint64_t blkid, uint64_t nblks,
boolean_t fetch_data, boolean_t have_lock)
{
zstream_t *zs;
spa_t *spa = zf->zf_dnode->dn_objset->os_spa;
if (zfs_prefetch_disable)
return (NULL);
/*
* If we haven't yet loaded the indirect vdevs' mappings, we
* can only read from blocks that we carefully ensure are on
* concrete vdevs (or previously-loaded indirect vdevs). So we
* can't allow the predictive prefetcher to attempt reads of other
* blocks (e.g. of the MOS's dnode object).
*/
if (!spa_indirect_vdevs_loaded(spa))
return (NULL);
/*
* As a fast path for small (single-block) files, ignore access
* to the first block.
*/
if (!have_lock && blkid == 0)
return (NULL);
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
/*
* A fast path for small files for which no prefetch will
* happen.
*/
uint64_t maxblkid = zf->zf_dnode->dn_maxblkid;
if (maxblkid < 2) {
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
mutex_enter(&zf->zf_lock);
/*
* Find matching prefetch stream. Depending on whether the accesses
* are block-aligned, first block of the new access may either follow
* the last block of the previous access, or be equal to it.
*/
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (blkid == zs->zs_blkid) {
break;
} else if (blkid + 1 == zs->zs_blkid) {
blkid++;
nblks--;
break;
}
}
/*
* If the file is ending, remove the matching stream if found.
* If not found then it is too late to create a new one now.
*/
uint64_t end_of_access_blkid = blkid + nblks;
if (end_of_access_blkid >= maxblkid) {
if (zs != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
/* Exit if we already prefetched this block before. */
if (nblks == 0) {
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
if (zs == NULL) {
/*
* This access is not part of any existing stream. Create
* a new stream for it.
*/
dmu_zfetch_stream_create(zf, end_of_access_blkid);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_misses);
return (NULL);
}
/*
* This access was to a block that we issued a prefetch for on
* behalf of this stream. Calculate further prefetch distances.
*
* Start prefetch from the demand access size (nblks). Double the
* distance every access up to zfetch_min_distance. After that only
* if needed increase the distance by 1/8 up to zfetch_max_distance.
*
* Don't double the distance beyond single block if we have more
* than ~6% of ARC held by active prefetches. It should help with
* getting out of RAM on some badly mispredicted read patterns.
*/
unsigned int dbs = zf->zf_dnode->dn_datablkshift;
unsigned int nbytes = nblks << dbs;
unsigned int pf_nblks;
if (fetch_data) {
if (unlikely(zs->zs_pf_dist < nbytes))
zs->zs_pf_dist = nbytes;
else if (zs->zs_pf_dist < zfetch_min_distance &&
(zs->zs_pf_dist < (1 << dbs) ||
aggsum_compare(&zfetch_sums.zfetchstat_io_active,
arc_c_max >> (4 + dbs)) < 0))
zs->zs_pf_dist *= 2;
else if (zs->zs_more)
zs->zs_pf_dist += zs->zs_pf_dist / 8;
zs->zs_more = B_FALSE;
if (zs->zs_pf_dist > zfetch_max_distance)
zs->zs_pf_dist = zfetch_max_distance;
pf_nblks = zs->zs_pf_dist >> dbs;
} else {
pf_nblks = 0;
}
if (zs->zs_pf_start < end_of_access_blkid)
zs->zs_pf_start = end_of_access_blkid;
if (zs->zs_pf_end < end_of_access_blkid + pf_nblks)
zs->zs_pf_end = end_of_access_blkid + pf_nblks;
/*
* Do the same for indirects, starting where we will stop reading
* data blocks (and the indirects that point to them).
*/
if (unlikely(zs->zs_ipf_dist < nbytes))
zs->zs_ipf_dist = nbytes;
else
zs->zs_ipf_dist *= 2;
if (zs->zs_ipf_dist > zfetch_max_idistance)
zs->zs_ipf_dist = zfetch_max_idistance;
pf_nblks = zs->zs_ipf_dist >> dbs;
if (zs->zs_ipf_start < zs->zs_pf_end)
zs->zs_ipf_start = zs->zs_pf_end;
if (zs->zs_ipf_end < zs->zs_pf_end + pf_nblks)
zs->zs_ipf_end = zs->zs_pf_end + pf_nblks;
zs->zs_blkid = end_of_access_blkid;
/* Protect the stream from reclamation. */
zs->zs_atime = gethrtime();
zfs_refcount_add(&zs->zs_refs, NULL);
/* Count concurrent callers. */
zfs_refcount_add(&zs->zs_callers, NULL);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_hits);
return (zs);
}
void
dmu_zfetch_run(zstream_t *zs, boolean_t missed, boolean_t have_lock)
{
zfetch_t *zf = zs->zs_fetch;
int64_t pf_start, pf_end, ipf_start, ipf_end;
int epbs, issued;
if (missed)
zs->zs_missed = missed;
/*
* Postpone the prefetch if there are more concurrent callers.
* It happens when multiple requests are waiting for the same
* indirect block. The last one will run the prefetch for all.
*/
if (zfs_refcount_remove(&zs->zs_callers, NULL) != 0) {
/* Drop reference taken in dmu_zfetch_prepare(). */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
mutex_enter(&zf->zf_lock);
if (zs->zs_missed) {
pf_start = zs->zs_pf_start;
pf_end = zs->zs_pf_start = zs->zs_pf_end;
} else {
pf_start = pf_end = 0;
}
ipf_start = zs->zs_ipf_start;
ipf_end = zs->zs_ipf_start = zs->zs_ipf_end;
mutex_exit(&zf->zf_lock);
ASSERT3S(pf_start, <=, pf_end);
ASSERT3S(ipf_start, <=, ipf_end);
epbs = zf->zf_dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
ipf_start = P2ROUNDUP(ipf_start, 1 << epbs) >> epbs;
ipf_end = P2ROUNDUP(ipf_end, 1 << epbs) >> epbs;
ASSERT3S(ipf_start, <=, ipf_end);
issued = pf_end - pf_start + ipf_end - ipf_start;
if (issued > 1) {
/* More references on top of taken in dmu_zfetch_prepare(). */
- for (int i = 0; i < issued - 1; i++)
- zfs_refcount_add(&zs->zs_refs, NULL);
+ zfs_refcount_add_few(&zs->zs_refs, issued - 1, NULL);
} else if (issued == 0) {
/* Some other thread has done our work, so drop the ref. */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
aggsum_add(&zfetch_sums.zfetchstat_io_active, issued);
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
issued = 0;
for (int64_t blk = pf_start; blk < pf_end; blk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 0, blk,
ZIO_PRIORITY_ASYNC_READ, 0, dmu_zfetch_done, zs);
}
for (int64_t iblk = ipf_start; iblk < ipf_end; iblk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 1, iblk,
ZIO_PRIORITY_ASYNC_READ, 0, dmu_zfetch_done, zs);
}
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
if (issued)
ZFETCHSTAT_ADD(zfetchstat_io_issued, issued);
}
void
dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data,
boolean_t missed, boolean_t have_lock)
{
zstream_t *zs;
zs = dmu_zfetch_prepare(zf, blkid, nblks, fetch_data, have_lock);
if (zs)
dmu_zfetch_run(zs, missed, have_lock);
}
ZFS_MODULE_PARAM(zfs_prefetch, zfs_prefetch_, disable, INT, ZMOD_RW,
"Disable all ZFS prefetching");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_streams, UINT, ZMOD_RW,
"Max number of streams per zfetch");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, min_sec_reap, UINT, ZMOD_RW,
"Min time before stream reclaim");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_sec_reap, UINT, ZMOD_RW,
"Max time before stream delete");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, min_distance, UINT, ZMOD_RW,
"Min bytes to prefetch per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_distance, UINT, ZMOD_RW,
"Max bytes to prefetch per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_idistance, UINT, ZMOD_RW,
"Max bytes to prefetch indirects for per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, array_rd_sz, U64, ZMOD_RW,
"Number of bytes in a array_read");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index 14e7ced4007c..d6db61729223 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -1,5015 +1,5014 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 RackTop Systems.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020 The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_bookmark.h>
#include <sys/policy.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/zio_compress.h>
#include <zfs_fletcher.h>
#include <sys/zio_checksum.h>
/*
* The SPA supports block sizes up to 16MB. However, very large blocks
* can have an impact on i/o latency (e.g. tying up a spinning disk for
* ~300ms), and also potentially on the memory allocator. Therefore,
* we did not allow the recordsize to be set larger than zfs_max_recordsize
* (former default: 1MB). Larger blocks could be created by changing this
* tunable, and pools with larger blocks could always be imported and used,
* regardless of this setting.
*
* We do, however, still limit it by default to 1M on x86_32, because Linux's
* 3/1 memory split doesn't leave much room for 16M chunks.
*/
#ifdef _ILP32
uint_t zfs_max_recordsize = 1 * 1024 * 1024;
#else
uint_t zfs_max_recordsize = 16 * 1024 * 1024;
#endif
static int zfs_allow_redacted_dataset_mount = 0;
int zfs_snapshot_history_enabled = 1;
#define SWITCH64(x, y) \
{ \
uint64_t __tmp = (x); \
(x) = (y); \
(y) = __tmp; \
}
#define DS_REF_MAX (1ULL << 62)
static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
uint64_t obj, dmu_tx_t *tx);
static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
dmu_tx_t *tx);
static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
extern uint_t spa_asize_inflation;
static zil_header_t zero_zil;
/*
* Figure out how much of this delta should be propagated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
static int64_t
parent_delta(dsl_dataset_t *ds, int64_t delta)
{
dsl_dataset_phys_t *ds_phys;
uint64_t old_bytes, new_bytes;
if (ds->ds_reserved == 0)
return (delta);
ds_phys = dsl_dataset_phys(ds);
old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
return (new_bytes - old_bytes);
}
void
dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
int64_t delta;
spa_feature_t f;
dprintf_bp(bp, "ds=%p", ds);
ASSERT(dmu_tx_is_syncing(tx));
/* It could have been compressed away to nothing */
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return;
ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
if (ds == NULL) {
dsl_pool_mos_diduse_space(tx->tx_pool,
used, compressed, uncompressed);
return;
}
ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
dsl_dataset_phys(ds)->ds_referenced_bytes += used;
dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
dsl_dataset_phys(ds)->ds_unique_bytes += used;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
ds->ds_feature_activation[SPA_FEATURE_LARGE_BLOCKS] =
(void *)B_TRUE;
}
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE) {
ASSERT3S(spa_feature_table[f].fi_type, ==,
ZFEATURE_TYPE_BOOLEAN);
ds->ds_feature_activation[f] = (void *)B_TRUE;
}
f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
if (f != SPA_FEATURE_NONE) {
ASSERT3S(spa_feature_table[f].fi_type, ==,
ZFEATURE_TYPE_BOOLEAN);
ds->ds_feature_activation[f] = (void *)B_TRUE;
}
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_allocs, bp);
}
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir, delta,
compressed, uncompressed, used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
}
/*
* Called when the specified segment has been remapped, and is thus no
* longer referenced in the head dataset. The vdev must be indirect.
*
* If the segment is referenced by a snapshot, put it on the remap deadlist.
* Otherwise, add this segment to the obsolete spacemap.
*/
void
dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
uint64_t size, uint64_t birth, dmu_tx_t *tx)
{
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(birth <= tx->tx_txg);
ASSERT(!ds->ds_is_snapshot);
if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
blkptr_t fakebp;
dva_t *dva = &fakebp.blk_dva[0];
ASSERT(ds != NULL);
mutex_enter(&ds->ds_remap_deadlist_lock);
if (!dsl_dataset_remap_deadlist_exists(ds)) {
dsl_dataset_create_remap_deadlist(ds, tx);
}
mutex_exit(&ds->ds_remap_deadlist_lock);
BP_ZERO(&fakebp);
fakebp.blk_birth = birth;
DVA_SET_VDEV(dva, vdev);
DVA_SET_OFFSET(dva, offset);
DVA_SET_ASIZE(dva, size);
dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, B_FALSE,
tx);
}
}
int
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
boolean_t async)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(bp->blk_birth <= tx->tx_txg);
if (ds == NULL) {
dsl_free(tx->tx_pool, tx->tx_txg, bp);
dsl_pool_mos_diduse_space(tx->tx_pool,
-used, -compressed, -uncompressed);
return (used);
}
ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
ASSERT(!ds->ds_is_snapshot);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees, bp);
}
if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;
dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
mutex_enter(&ds->ds_lock);
ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
delta = parent_delta(ds, -used);
dsl_dataset_phys(ds)->ds_unique_bytes -= used;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir,
delta, -compressed, -uncompressed, -used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
/*
* We are here as part of zio's write done callback,
* which means we're a zio interrupt thread. We can't
* call dsl_deadlist_insert() now because it may block
* waiting for I/O. Instead, put bp on the deferred
* queue and let dsl_pool_sync() finish the job.
*/
bplist_append(&ds->ds_pending_deadlist, bp);
} else {
dsl_deadlist_insert(&ds->ds_deadlist, bp, B_FALSE, tx);
}
ASSERT3U(ds->ds_prev->ds_object, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj);
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object && bp->blk_birth >
dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
mutex_enter(&ds->ds_prev->ds_lock);
dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
mutex_exit(&ds->ds_prev->ds_lock);
}
if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
dsl_dir_transfer_space(ds->ds_dir, used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
}
dsl_bookmark_block_killed(ds, bp, tx);
mutex_enter(&ds->ds_lock);
ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
mutex_exit(&ds->ds_lock);
return (used);
}
struct feature_type_uint64_array_arg {
uint64_t length;
uint64_t *array;
};
static void
unload_zfeature(dsl_dataset_t *ds, spa_feature_t f)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
kmem_free(ftuaa->array, ftuaa->length * sizeof (uint64_t));
kmem_free(ftuaa, sizeof (*ftuaa));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static int
load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
{
int err = 0;
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
err = zap_contains(mos, ds->ds_object,
spa_feature_table[f].fi_guid);
if (err == 0) {
ds->ds_feature[f] = (void *)B_TRUE;
} else {
ASSERT3U(err, ==, ENOENT);
err = 0;
}
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
uint64_t int_size, num_int;
uint64_t *data;
err = zap_length(mos, ds->ds_object,
spa_feature_table[f].fi_guid, &int_size, &num_int);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
err = 0;
break;
}
ASSERT3U(int_size, ==, sizeof (uint64_t));
data = kmem_alloc(int_size * num_int, KM_SLEEP);
VERIFY0(zap_lookup(mos, ds->ds_object,
spa_feature_table[f].fi_guid, int_size, num_int, data));
struct feature_type_uint64_array_arg *ftuaa =
kmem_alloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = num_int;
ftuaa->array = data;
ds->ds_feature[f] = ftuaa;
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
return (err);
}
/*
* We have to release the fsid synchronously or we risk that a subsequent
* mount of the same dataset will fail to unique_insert the fsid. This
* failure would manifest itself as the fsid of this dataset changing
* between mounts which makes NFS clients quite unhappy.
*/
static void
dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
unique_remove(ds->ds_fsid_guid);
}
static void
dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
ds->ds_dbuf = NULL;
if (ds->ds_objset != NULL)
dmu_objset_evict(ds->ds_objset);
if (ds->ds_prev) {
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
dsl_bookmark_fini_ds(ds);
bplist_destroy(&ds->ds_pending_deadlist);
if (dsl_deadlist_is_open(&ds->ds_deadlist))
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
if (ds->ds_dir)
dsl_dir_async_rele(ds->ds_dir, ds);
ASSERT(!list_link_active(&ds->ds_synced_link));
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
}
int
dsl_dataset_get_snapname(dsl_dataset_t *ds)
{
dsl_dataset_phys_t *headphys;
int err;
dmu_buf_t *headdbuf;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
if (ds->ds_snapname[0])
return (0);
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
return (0);
err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
FTAG, &headdbuf);
if (err != 0)
return (err);
headphys = headdbuf->db_data;
err = zap_value_search(dp->dp_meta_objset,
headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
if (err != 0 && zfs_recover == B_TRUE) {
err = 0;
(void) snprintf(ds->ds_snapname, sizeof (ds->ds_snapname),
"SNAPOBJ=%llu-ERR=%d",
(unsigned long long)ds->ds_object, err);
}
dmu_buf_rele(headdbuf, FTAG);
return (err);
}
int
dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_lookup_norm(mos, snapobj, name, 8, 1,
value, mt, NULL, 0, NULL);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_lookup(mos, snapobj, name, 8, 1, value);
return (err);
}
int
dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
dsl_dir_snap_cmtime_update(ds->ds_dir, tx);
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_remove_norm(mos, snapobj, name, mt, tx);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_remove(mos, snapobj, name, tx);
if (err == 0 && adj_cnt)
dsl_fs_ss_count_adjust(ds->ds_dir, -1,
DD_FIELD_SNAPSHOT_COUNT, tx);
return (err);
}
boolean_t
dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, const void *tag)
{
dmu_buf_t *dbuf = ds->ds_dbuf;
boolean_t result = B_FALSE;
if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
ds->ds_object, DMU_BONUS_BLKID, tag)) {
if (ds == dmu_buf_get_user(dbuf))
result = B_TRUE;
else
dmu_buf_rele(dbuf, tag);
}
return (result);
}
int
dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, const void *tag,
dsl_dataset_t **dsp)
{
objset_t *mos = dp->dp_meta_objset;
dmu_buf_t *dbuf;
dsl_dataset_t *ds;
int err;
dmu_object_info_t doi;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
if (err != 0)
return (err);
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
dmu_buf_rele(dbuf, tag);
return (SET_ERROR(EINVAL));
}
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
list_link_init(&ds->ds_synced_link);
err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
NULL, ds, &ds->ds_dir);
if (err != 0) {
kmem_free(ds, sizeof (dsl_dataset_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_remap_deadlist_lock,
NULL, MUTEX_DEFAULT, NULL);
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
zfs_refcount_create(&ds->ds_longholds);
bplist_create(&ds->ds_pending_deadlist);
list_create(&ds->ds_sendstreams, sizeof (dmu_sendstatus_t),
offsetof(dmu_sendstatus_t, dss_link));
list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_ds_node));
if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
spa_feature_t f;
for (f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET))
continue;
err = load_zfeature(mos, ds, f);
}
}
if (!ds->ds_is_snapshot) {
ds->ds_snapname[0] = '\0';
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev);
}
if (err != 0)
goto after_dsl_bookmark_fini;
err = dsl_bookmark_init_ds(ds);
} else {
if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
err = dsl_dataset_get_snapname(ds);
if (err == 0 &&
dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
err = zap_count(
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_userrefs_obj,
&ds->ds_userrefs);
}
}
if (err == 0 && !ds->ds_is_snapshot) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
&ds->ds_reserved);
if (err == 0) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
&ds->ds_quota);
}
} else {
ds->ds_reserved = ds->ds_quota = 0;
}
if (err == 0 && ds->ds_dir->dd_crypto_obj != 0 &&
ds->ds_is_snapshot &&
zap_contains(mos, dsobj, DS_FIELD_IVSET_GUID) != 0) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
dsl_deadlist_open(&ds->ds_deadlist,
mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
if (remap_deadlist_obj != 0) {
dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
remap_deadlist_obj);
}
dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
dsl_dataset_evict_async, &ds->ds_dbuf);
if (err == 0)
winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
if (err != 0 || winner != NULL) {
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_bookmark_fini_ds(ds);
after_dsl_bookmark_fini:
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
dsl_dir_rele(ds->ds_dir, ds);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
list_destroy(&ds->ds_sendstreams);
bplist_destroy(&ds->ds_pending_deadlist);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
if (err != 0) {
dmu_buf_rele(dbuf, tag);
return (err);
}
ds = winner;
} else {
ds->ds_fsid_guid =
unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
if (ds->ds_fsid_guid !=
dsl_dataset_phys(ds)->ds_fsid_guid) {
zfs_dbgmsg("ds_fsid_guid changed from "
"%llx to %llx for pool %s dataset id %llu",
(long long)
dsl_dataset_phys(ds)->ds_fsid_guid,
(long long)ds->ds_fsid_guid,
spa_name(dp->dp_spa),
(u_longlong_t)dsobj);
}
}
}
ASSERT3P(ds->ds_dbuf, ==, dbuf);
ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
*dsp = ds;
return (0);
}
int
dsl_dataset_create_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd->dd_crypto_obj == 0)
return (0);
return (spa_keystore_create_mapping(dd->dd_pool->dp_spa,
ds, ds, &ds->ds_key_mapping));
}
int
dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
int err;
err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
if (err != 0)
return (err);
ASSERT3P(*dsp, !=, NULL);
if (flags & DS_HOLD_FLAG_DECRYPT) {
err = dsl_dataset_create_key_mapping(*dsp);
if (err != 0)
dsl_dataset_rele(*dsp, tag);
}
return (err);
}
int
dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
dsl_dir_t *dd;
const char *snapname;
uint64_t obj;
int err = 0;
dsl_dataset_t *ds;
err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
if (err != 0)
return (err);
ASSERT(dsl_pool_config_held(dp));
obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
if (obj != 0)
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag, &ds);
else
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
dsl_dataset_t *snap_ds;
if (*snapname++ != '@') {
dsl_dataset_rele_flags(ds, flags, tag);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
err = dsl_dataset_snap_lookup(ds, snapname, &obj);
if (err == 0) {
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag,
&snap_ds);
}
dsl_dataset_rele_flags(ds, flags, tag);
if (err == 0) {
mutex_enter(&snap_ds->ds_lock);
if (snap_ds->ds_snapname[0] == 0)
(void) strlcpy(snap_ds->ds_snapname, snapname,
sizeof (snap_ds->ds_snapname));
mutex_exit(&snap_ds->ds_lock);
ds = snap_ds;
}
}
if (err == 0)
*dsp = ds;
dsl_dir_rele(dd, FTAG);
return (err);
}
int
dsl_dataset_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
}
static int
dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
*dsp = NULL;
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_FALSE, dsp));
}
int
dsl_dataset_own_obj_force(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_TRUE, dsp));
}
static int
dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_force(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_TRUE, dsp));
}
int
dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_FALSE, dsp));
}
/*
* See the comment above dsl_pool_hold() for details. In summary, a long
* hold is used to prevent destruction of a dataset while the pool hold
* is dropped, allowing other concurrent operations (e.g. spa_sync()).
*
* The dataset and pool must be held when this function is called. After it
* is called, the pool hold may be released while the dataset is still held
* and accessed.
*/
void
dsl_dataset_long_hold(dsl_dataset_t *ds, const void *tag)
{
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
(void) zfs_refcount_add(&ds->ds_longholds, tag);
}
void
dsl_dataset_long_rele(dsl_dataset_t *ds, const void *tag)
{
(void) zfs_refcount_remove(&ds->ds_longholds, tag);
}
/* Return B_TRUE if there are any long holds on this dataset. */
boolean_t
dsl_dataset_long_held(dsl_dataset_t *ds)
{
return (!zfs_refcount_is_zero(&ds->ds_longholds));
}
void
dsl_dataset_name(dsl_dataset_t *ds, char *name)
{
if (ds == NULL) {
(void) strlcpy(name, "mos", ZFS_MAX_DATASET_NAME_LEN);
} else {
dsl_dir_name(ds->ds_dir, name);
VERIFY0(dsl_dataset_get_snapname(ds));
if (ds->ds_snapname[0]) {
VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
/*
* We use a "recursive" mutex so that we
* can call dprintf_ds() with ds_lock held.
*/
if (!MUTEX_HELD(&ds->ds_lock)) {
mutex_enter(&ds->ds_lock);
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&ds->ds_lock);
} else {
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
}
}
}
}
int
dsl_dataset_namelen(dsl_dataset_t *ds)
{
VERIFY0(dsl_dataset_get_snapname(ds));
mutex_enter(&ds->ds_lock);
int len = strlen(ds->ds_snapname);
mutex_exit(&ds->ds_lock);
/* add '@' if ds is a snap */
if (len > 0)
len++;
len += dsl_dir_namelen(ds->ds_dir);
return (len);
}
void
dsl_dataset_rele(dsl_dataset_t *ds, const void *tag)
{
dmu_buf_rele(ds->ds_dbuf, tag);
}
void
dsl_dataset_remove_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd == NULL || dd->dd_crypto_obj == 0)
return;
(void) spa_keystore_remove_mapping(dd->dd_pool->dp_spa,
ds->ds_object, ds);
}
void
dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
const void *tag)
{
if (flags & DS_HOLD_FLAG_DECRYPT)
dsl_dataset_remove_key_mapping(ds);
dsl_dataset_rele(ds, tag);
}
void
dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, const void *tag)
{
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
dsl_dataset_long_rele(ds, tag);
dsl_dataset_rele_flags(ds, flags, tag);
}
boolean_t
dsl_dataset_tryown(dsl_dataset_t *ds, const void *tag, boolean_t override)
{
boolean_t gotit = FALSE;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_lock);
if (ds->ds_owner == NULL && (override || !(DS_IS_INCONSISTENT(ds) ||
(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS) &&
!zfs_allow_redacted_dataset_mount)))) {
ds->ds_owner = tag;
dsl_dataset_long_hold(ds, tag);
gotit = TRUE;
}
mutex_exit(&ds->ds_lock);
return (gotit);
}
boolean_t
dsl_dataset_has_owner(dsl_dataset_t *ds)
{
boolean_t rv;
mutex_enter(&ds->ds_lock);
rv = (ds->ds_owner != NULL);
mutex_exit(&ds->ds_lock);
return (rv);
}
static boolean_t
zfeature_active(spa_feature_t f, void *arg)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN: {
boolean_t val = (boolean_t)(uintptr_t)arg;
ASSERT(val == B_FALSE || val == B_TRUE);
return (val);
}
case ZFEATURE_TYPE_UINT64_ARRAY:
/*
* In this case, arg is a uint64_t array. The feature is active
* if the array is non-null.
*/
return (arg != NULL);
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
return (B_FALSE);
}
}
boolean_t
dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f)
{
return (zfeature_active(f, ds->ds_feature[f]));
}
/*
* The buffers passed out by this function are references to internal buffers;
* they should not be freed by callers of this function, and they should not be
* used after the dataset has been released.
*/
boolean_t
dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds, spa_feature_t f,
uint64_t *outlength, uint64_t **outp)
{
VERIFY(spa_feature_table[f].fi_type & ZFEATURE_TYPE_UINT64_ARRAY);
if (!dsl_dataset_feature_is_active(ds, f)) {
return (B_FALSE);
}
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
*outp = ftuaa->array;
*outlength = ftuaa->length;
return (B_TRUE);
}
void
dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t zero = 0;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
spa_feature_incr(spa, f, tx);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
ASSERT3S((boolean_t)(uintptr_t)arg, ==, B_TRUE);
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (zero), 1, &zero, tx));
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = arg;
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (uint64_t), ftuaa->length, ftuaa->array, tx));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static void
dsl_dataset_deactivate_feature_impl(dsl_dataset_t *ds, spa_feature_t f,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t dsobj = ds->ds_object;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
spa_feature_decr(spa, f, tx);
ds->ds_feature[f] = NULL;
}
void
dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f, dmu_tx_t *tx)
{
unload_zfeature(ds, f);
dsl_dataset_deactivate_feature_impl(ds, f, tx);
}
uint64_t
dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
dsl_crypto_params_t *dcp, uint64_t flags, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj;
objset_t *mos = dp->dp_meta_objset;
if (origin == NULL)
origin = dp->dp_origin_snap;
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_snapnames_zapobj =
zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
DMU_OT_NONE, 0, tx);
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
if (origin == NULL) {
dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
} else {
dsl_dataset_t *ohds; /* head of the origin snapshot */
dsphys->ds_prev_snap_obj = origin->ds_object;
dsphys->ds_prev_snap_txg =
dsl_dataset_phys(origin)->ds_creation_txg;
dsphys->ds_referenced_bytes =
dsl_dataset_phys(origin)->ds_referenced_bytes;
dsphys->ds_compressed_bytes =
dsl_dataset_phys(origin)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(origin)->ds_uncompressed_bytes;
rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
rrw_exit(&origin->ds_bp_rwlock, FTAG);
/*
* Inherit flags that describe the dataset's contents
* (INCONSISTENT) or properties (Case Insensitive).
*/
dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
(DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, origin->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
origin->ds_feature[f], tx);
}
}
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_dataset_phys(origin)->ds_num_children++;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
FTAG, &ohds));
dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
dsl_dataset_rele(ohds, FTAG);
if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
dsl_dataset_phys(origin)->ds_next_clones_obj =
zap_create(mos,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(origin)->ds_next_clones_obj,
dsobj, tx));
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos,
DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dir_phys(origin->ds_dir)->dd_clones,
dsobj, tx));
}
}
/* handle encryption */
dsl_dataset_create_crypt_sync(dsobj, dd, origin, dcp, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
dmu_buf_rele(dbuf, FTAG);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
return (dsobj);
}
static void
dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (memcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
memset(&os->os_zil_header, 0, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
dsl_dataset_sync_done(ds, tx);
}
}
uint64_t
dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *cr,
dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = pdd->dd_pool;
uint64_t dsobj, ddobj;
dsl_dir_t *dd;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(lastname[0] != '@');
/*
* Filesystems will eventually have their origin set to dp_origin_snap,
* but that's taken care of in dsl_dataset_create_sync_dd. When
* creating a filesystem, this function is called with origin equal to
* NULL.
*/
if (origin != NULL)
ASSERT3P(origin, !=, dp->dp_origin_snap);
ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
dsobj = dsl_dataset_create_sync_dd(dd, origin, dcp,
flags & ~DS_CREATE_FLAG_NODIRTY, tx);
dsl_deleg_set_create_perms(dd, tx, cr);
/*
* If we are creating a clone and the livelist feature is enabled,
* add the entry DD_FIELD_LIVELIST to ZAP.
*/
if (origin != NULL &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LIVELIST)) {
objset_t *mos = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
uint64_t obj = dsl_deadlist_alloc(mos, tx);
VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj, tx));
spa_feature_incr(dp->dp_spa, SPA_FEATURE_LIVELIST, tx);
}
/*
* Since we're creating a new node we know it's a leaf, so we can
* initialize the counts if the limit feature is active.
*/
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
uint64_t cnt = 0;
objset_t *os = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (cnt), 1, &cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (cnt), 1, &cnt, tx));
}
dsl_dir_rele(dd, FTAG);
/*
* If we are creating a clone, make sure we zero out any stale
* data from the origin snapshots zil header.
*/
if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_zero_zil(ds, tx);
dsl_dataset_rele(ds, FTAG);
}
return (dsobj);
}
/*
* The unique space in the head dataset can be calculated by subtracting
* the space used in the most recent snapshot, that is still being used
* in this file system, from the space currently in use. To figure out
* the space in the most recent snapshot still in use, we need to take
* the total space used in the snapshot and subtract out the space that
* has been freed up since the snapshot was taken.
*/
void
dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
{
uint64_t mrs_used;
uint64_t dlused, dlcomp, dluncomp;
ASSERT(!ds->ds_is_snapshot);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
else
mrs_used = 0;
dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
ASSERT3U(dlused, <=, mrs_used);
dsl_dataset_phys(ds)->ds_unique_bytes =
dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
}
void
dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t count __maybe_unused;
int err;
ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
obj, tx);
/*
* The err should not be ENOENT, but a bug in a previous version
* of the code could cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a missing entry.
* If we knew that the pool was created after
* SPA_VERSION_NEXT_CLONES, we could assert that it isn't
* ENOENT. However, at least we can check that we don't have
* too many entries in the next_clones_obj even after failing to
* remove this one.
*/
if (err != ENOENT)
VERIFY0(err);
ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
}
blkptr_t *
dsl_dataset_get_blkptr(dsl_dataset_t *ds)
{
return (&dsl_dataset_phys(ds)->ds_bp);
}
spa_t *
dsl_dataset_get_spa(dsl_dataset_t *ds)
{
return (ds->ds_dir->dd_pool->dp_spa);
}
void
dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp;
if (ds == NULL) /* this is the meta-objset */
return;
ASSERT(ds->ds_objset != NULL);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
panic("dirtying snapshot!");
/* Must not dirty a dataset in the same txg where it got snapshotted. */
ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dp = ds->ds_dir->dd_pool;
if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
objset_t *os = ds->ds_objset;
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, ds);
/* if this dataset is encrypted, grab a reference to the DCK */
if (ds->ds_dir->dd_crypto_obj != 0 &&
!os->os_raw_receive &&
!os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_add_ref(ds->ds_key_mapping, ds);
}
}
}
static int
dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t asize;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If there's an fs-only reservation, any blocks that might become
* owned by the snapshot dataset must be accommodated by space
* outside of the reservation.
*/
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
* snapshot checks in this sync group.
*/
if (asize > 0)
dsl_dir_willuse_space(ds->ds_dir, asize, tx);
return (0);
}
int
dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc)
{
int error;
uint64_t value;
ds->ds_trysnap_txg = tx->tx_txg;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* We don't allow multiple snapshots of the same txg. If there
* is already one, try again.
*/
if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
/*
* We don't allow taking snapshots of inconsistent datasets, such as
* those into which we are currently receiving. However, if we are
* creating this snapshot as part of a receive, this check will be
* executed atomically with respect to the completion of the receive
* itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
* case we ignore this, knowing it will be fixed up for us shortly in
* dmu_recv_end_sync().
*/
if (!recv && DS_IS_INCONSISTENT(ds))
return (SET_ERROR(EBUSY));
/*
* Skip the check for temporary snapshots or if we have already checked
* the counts in dsl_dataset_snapshot_check. This means we really only
* check the count here when we're receiving a stream.
*/
if (cnt != 0 && cr != NULL) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr, proc);
if (error != 0)
return (error);
}
error = dsl_dataset_snapshot_reserve_space(ds, tx);
if (error != 0)
return (error);
return (0);
}
int
dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
int rv = 0;
/*
* Pre-compute how many total new snapshots will be created for each
* level in the tree and below. This is needed for validating the
* snapshot limit when either taking a recursive snapshot or when
* taking multiple snapshots.
*
* The problem is that the counts are not actually adjusted when
* we are checking, only when we finally sync. For a single snapshot,
* this is easy, the count will increase by 1 at each node up the tree,
* but its more complicated for the recursive/multiple snapshot case.
*
* The dsl_fs_ss_limit_check function does recursively check the count
* at each level up the tree but since it is validating each snapshot
* independently we need to be sure that we are validating the complete
* count for the entire set of snapshots. We do this by rolling up the
* counts for each component of the name into an nvlist and then
* checking each of those cases with the aggregated count.
*
* This approach properly handles not only the recursive snapshot
* case (where we get all of those on the ddsa_snaps list) but also
* the sibling case (e.g. snapshot a/b and a/c so that we will also
* validate the limit on 'a' using a count of 2).
*
* We validate the snapshot names in the third loop and only report
* name errors once.
*/
if (dmu_tx_is_syncing(tx)) {
char *nm;
nvlist_t *cnt_track = NULL;
cnt_track = fnvlist_alloc();
nm = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/* Rollup aggregated counts into the cnt_track list */
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL;
pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
char *pdelim;
uint64_t val;
(void) strlcpy(nm, nvpair_name(pair), MAXPATHLEN);
pdelim = strchr(nm, '@');
if (pdelim == NULL)
continue;
*pdelim = '\0';
do {
if (nvlist_lookup_uint64(cnt_track, nm,
&val) == 0) {
/* update existing entry */
fnvlist_add_uint64(cnt_track, nm,
val + 1);
} else {
/* add to list */
fnvlist_add_uint64(cnt_track, nm, 1);
}
pdelim = strrchr(nm, '/');
if (pdelim != NULL)
*pdelim = '\0';
} while (pdelim != NULL);
}
kmem_free(nm, MAXPATHLEN);
/* Check aggregated counts at each level */
for (pair = nvlist_next_nvpair(cnt_track, NULL);
pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
int error = 0;
const char *name;
uint64_t cnt = 0;
dsl_dataset_t *ds;
name = nvpair_name(pair);
cnt = fnvpair_value_uint64(pair);
ASSERT(cnt > 0);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
ddsa->ddsa_cr, ddsa->ddsa_proc);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL)
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
rv = error;
/* only report one error for this check */
break;
}
}
nvlist_free(cnt_track);
}
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
int error = 0;
dsl_dataset_t *ds;
const char *name, *atp = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
if (error == 0)
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == 0) {
/* passing 0/NULL skips dsl_fs_ss_limit_check */
error = dsl_dataset_snapshot_check_impl(ds,
atp + 1, tx, B_FALSE, 0, NULL, NULL);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL) {
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
}
rv = error;
}
}
return (rv);
}
void
dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj, crtxg;
objset_t *mos = dp->dp_meta_objset;
objset_t *os __maybe_unused;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
/*
* If we are on an old pool, the zil must not be active, in which
* case it will be zeroed. Usually zil_suspend() accomplishes this.
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
memcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, tx->tx_txg));
dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
/*
* The origin's ds_creation_txg has to be < TXG_INITIAL
*/
if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
crtxg = 1;
else
crtxg = tx->tx_txg;
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
memset(dsphys, 0, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsphys->ds_next_snap_obj = ds->ds_object;
dsphys->ds_num_children = 1;
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = crtxg;
dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(ds)->ds_uncompressed_bytes;
dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dmu_buf_rele(dbuf, FTAG);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, ds->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
ds->ds_feature[f], tx);
}
}
ASSERT3U(ds->ds_prev != 0, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
if (ds->ds_prev) {
uint64_t next_clones_obj =
dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object ||
dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
} else if (next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
dsphys->ds_next_snap_obj, tx);
VERIFY0(zap_add_int(mos,
next_clones_obj, dsobj, tx));
}
}
/*
* If we have a reference-reservation on this dataset, we will
* need to increase the amount of refreservation being charged
* since our unique space is going to zero.
*/
if (ds->ds_reserved) {
int64_t delta;
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
ds->ds_reserved);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
delta, 0, 0, tx);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_add_key(&ds->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
dsl_bookmark_snapshotted(ds, tx);
if (dsl_dataset_remap_deadlist_exists(ds)) {
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
/*
* Move the remap_deadlist to the snapshot. The head
* will create a new remap deadlist on demand, from
* dsl_dataset_block_remapped().
*/
dsl_dataset_unset_remap_deadlist_object(ds, tx);
dsl_deadlist_close(&ds->ds_remap_deadlist);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
}
/*
* Create a ivset guid for this snapshot if the dataset is
* encrypted. This may be overridden by a raw receive. A
* previous implementation of this code did not have this
* field as part of the on-disk format for ZFS encryption
* (see errata #4). As part of the remediation for this
* issue, we ask the user to enable the bookmark_v2 feature
* which is now a dependency of the encryption feature. We
* use this as a heuristic to determine when the user has
* elected to correct any datasets created with the old code.
* As a result, we only do this step if the bookmark_v2
* feature is enabled, which limits the number of states a
* given pool / dataset can be in with regards to terms of
* correcting the issue.
*/
if (ds->ds_dir->dd_crypto_obj != 0 &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2)) {
uint64_t ivset_guid = unique_create();
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_IVSET_GUID,
sizeof (ivset_guid), 1, &ivset_guid, tx));
}
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
dsl_dataset_phys(ds)->ds_unique_bytes = 0;
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
snapname, 8, 1, &dsobj, tx));
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
dsl_scan_ds_snapshotted(ds, tx);
dsl_dir_snap_cmtime_update(ds->ds_dir, tx);
if (zfs_snapshot_history_enabled)
spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, " ");
}
void
dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
dsl_dataset_t *ds;
const char *name, *atp;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
atp = strchr(name, '@');
(void) strlcpy(dsname, name, atp - name + 1);
VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
if (ddsa->ddsa_props != NULL) {
dsl_props_set_sync_impl(ds->ds_prev,
ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
}
dsl_dataset_rele(ds, FTAG);
}
}
/*
* The snapshots must all be in the same pool.
* All-or-nothing: if there are any failures, nothing will be modified.
*/
int
dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
{
dsl_dataset_snapshot_arg_t ddsa;
nvpair_t *pair;
boolean_t needsuspend;
int error;
spa_t *spa;
const char *firstname;
nvlist_t *suspended = NULL;
pair = nvlist_next_nvpair(snaps, NULL);
if (pair == NULL)
return (0);
firstname = nvpair_name(pair);
error = spa_open(firstname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
suspended = fnvlist_alloc();
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
const char *snapname = nvpair_name(pair);
const char *atp;
void *cookie;
atp = strchr(snapname, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
error = zil_suspend(fsname, &cookie);
if (error != 0)
break;
fnvlist_add_uint64(suspended, fsname,
(uintptr_t)cookie);
}
}
ddsa.ddsa_snaps = snaps;
ddsa.ddsa_props = props;
ddsa.ddsa_errors = errors;
ddsa.ddsa_cr = CRED();
ddsa.ddsa_proc = curproc;
if (error == 0) {
error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa,
fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
}
if (suspended != NULL) {
for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
pair = nvlist_next_nvpair(suspended, pair)) {
zil_resume((void *)(uintptr_t)
fnvpair_value_uint64(pair));
}
fnvlist_free(suspended);
}
if (error == 0) {
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
zvol_create_minor(nvpair_name(pair));
}
}
return (error);
}
typedef struct dsl_dataset_snapshot_tmp_arg {
const char *ddsta_fsname;
const char *ddsta_snapname;
minor_t ddsta_cleanup_minor;
const char *ddsta_htag;
} dsl_dataset_snapshot_tmp_arg_t;
static int
dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* NULL cred means no limit check for tmp snapshot */
error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
tx, B_FALSE, 0, NULL, NULL);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag)
{
dsl_dataset_snapshot_tmp_arg_t ddsta;
int error;
spa_t *spa;
boolean_t needsuspend;
void *cookie;
ddsta.ddsta_fsname = fsname;
ddsta.ddsta_snapname = snapname;
ddsta.ddsta_cleanup_minor = cleanup_minor;
ddsta.ddsta_htag = htag;
error = spa_open(fsname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
error = zil_suspend(fsname, &cookie);
if (error != 0)
return (error);
}
error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
if (needsuspend)
zil_resume(cookie);
return (error);
}
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
/*
* in case we had to change ds_fsid_guid when we opened it,
* sync it out now.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
&ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
&ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
&ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
}
dmu_objset_sync(ds->ds_objset, zio, tx);
}
/*
* Check if the percentage of blocks shared between the clone and the
* snapshot (as opposed to those that are clone only) is below a certain
* threshold
*/
static boolean_t
dsl_livelist_should_disable(dsl_dataset_t *ds)
{
uint64_t used, referenced;
int percent_shared;
used = dsl_dir_get_usedds(ds->ds_dir);
referenced = dsl_get_referenced(ds);
if (referenced == 0)
return (B_FALSE);
percent_shared = (100 * (referenced - used)) / referenced;
if (percent_shared <= zfs_livelist_min_percent_shared)
return (B_TRUE);
return (B_FALSE);
}
/*
* Check if it is possible to combine two livelist entries into one.
* This is the case if the combined number of 'live' blkptrs (ALLOCs that
* don't have a matching FREE) is under the maximum sublist size.
* We check this by subtracting twice the total number of frees from the total
* number of blkptrs. FREEs are counted twice because each FREE blkptr
* will cancel out an ALLOC blkptr when the livelist is processed.
*/
static boolean_t
dsl_livelist_should_condense(dsl_deadlist_entry_t *first,
dsl_deadlist_entry_t *next)
{
uint64_t total_free = first->dle_bpobj.bpo_phys->bpo_num_freed +
next->dle_bpobj.bpo_phys->bpo_num_freed;
uint64_t total_entries = first->dle_bpobj.bpo_phys->bpo_num_blkptrs +
next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
if ((total_entries - (2 * total_free)) < zfs_livelist_max_entries)
return (B_TRUE);
return (B_FALSE);
}
typedef struct try_condense_arg {
spa_t *spa;
dsl_dataset_t *ds;
} try_condense_arg_t;
/*
* Iterate over the livelist entries, searching for a pair to condense.
* A nonzero return value means stop, 0 means keep looking.
*/
static int
dsl_livelist_try_condense(void *arg, dsl_deadlist_entry_t *first)
{
try_condense_arg_t *tca = arg;
spa_t *spa = tca->spa;
dsl_dataset_t *ds = tca->ds;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
dsl_deadlist_entry_t *next;
/* The condense thread has not yet been created at import */
if (spa->spa_livelist_condense_zthr == NULL)
return (1);
/* A condense is already in progress */
if (spa->spa_to_condense.ds != NULL)
return (1);
next = AVL_NEXT(&ll->dl_tree, &first->dle_node);
/* The livelist has only one entry - don't condense it */
if (next == NULL)
return (1);
/* Next is the newest entry - don't condense it */
if (AVL_NEXT(&ll->dl_tree, &next->dle_node) == NULL)
return (1);
/* This pair is not ready to condense but keep looking */
if (!dsl_livelist_should_condense(first, next))
return (0);
/*
* Add a ref to prevent the dataset from being evicted while
* the condense zthr or synctask are running. Ref will be
* released at the end of the condense synctask
*/
dmu_buf_add_ref(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = ds;
spa->spa_to_condense.first = first;
spa->spa_to_condense.next = next;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
zthr_wakeup(spa->spa_livelist_condense_zthr);
return (1);
}
static void
dsl_flush_pending_livelist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_dir_t *dd = ds->ds_dir;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
dsl_deadlist_entry_t *last = dsl_deadlist_last(&dd->dd_livelist);
/* Check if we need to add a new sub-livelist */
if (last == NULL) {
/* The livelist is empty */
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
} else if (spa_sync_pass(spa) == 1) {
/*
* Check if the newest entry is full. If it is, make a new one.
* We only do this once per sync because we could overfill a
* sublist in one sync pass and don't want to add another entry
* for a txg that is already represented. This ensures that
* blkptrs born in the same txg are stored in the same sublist.
*/
bpobj_t bpobj = last->dle_bpobj;
uint64_t all = bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t free = bpobj.bpo_phys->bpo_num_freed;
uint64_t alloc = all - free;
if (alloc > zfs_livelist_max_entries) {
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
}
}
/* Insert each entry into the on-disk livelist */
bplist_iterate(&dd->dd_pending_allocs,
dsl_deadlist_insert_alloc_cb, &dd->dd_livelist, tx);
bplist_iterate(&dd->dd_pending_frees,
dsl_deadlist_insert_free_cb, &dd->dd_livelist, tx);
/* Attempt to condense every pair of adjacent entries */
try_condense_arg_t arg = {
.spa = spa,
.ds = ds
};
dsl_deadlist_iterate(&dd->dd_livelist, dsl_livelist_try_condense,
&arg);
}
void
dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os = ds->ds_objset;
bplist_iterate(&ds->ds_pending_deadlist,
dsl_deadlist_insert_alloc_cb, &ds->ds_deadlist, tx);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
dsl_flush_pending_livelist(ds, tx);
if (dsl_livelist_should_disable(ds)) {
dsl_dir_remove_livelist(ds->ds_dir, tx, B_TRUE);
}
}
dsl_bookmark_sync_done(ds, tx);
multilist_destroy(&os->os_synced_dnodes);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
else
ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f,
ds->ds_feature_activation[f])) {
if (zfeature_active(f, ds->ds_feature[f]))
continue;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
}
ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
}
int
get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
{
uint64_t count = 0;
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* There may be missing entries in ds_next_clones_obj
* due to a bug in a previous version of the code.
* Only trust it if it has the right number of entries.
*/
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
}
if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
return (SET_ERROR(ENOENT));
}
for (zap_cursor_init(&zc, mos,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *clone;
char buf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
za.za_first_integer, FTAG, &clone));
dsl_dir_name(clone->ds_dir, buf);
fnvlist_add_boolean(val, buf);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(&zc);
return (0);
}
void
get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
{
nvlist_t *propval = fnvlist_alloc();
nvlist_t *val = fnvlist_alloc();
if (get_clones_stat_impl(ds, val) == 0) {
fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
propval);
}
nvlist_free(val);
nvlist_free(propval);
}
static char *
get_receive_resume_token_impl(dsl_dataset_t *ds)
{
if (!dsl_dataset_has_resume_receive_state(ds))
return (NULL);
dsl_pool_t *dp = ds->ds_dir->dd_pool;
char *str;
void *packed;
uint8_t *compressed;
uint64_t val;
nvlist_t *token_nv = fnvlist_alloc();
size_t packed_size, compressed_size;
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "fromguid", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "object", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "offset", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "bytes", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "toguid", val);
}
char buf[MAXNAMELEN];
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
fnvlist_add_string(token_nv, "toname", buf);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_LARGEBLOCK) == 0) {
fnvlist_add_boolean(token_nv, "largeblockok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_EMBEDOK) == 0) {
fnvlist_add_boolean(token_nv, "embedok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_COMPRESSOK) == 0) {
fnvlist_add_boolean(token_nv, "compressok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_RAWOK) == 0) {
fnvlist_add_boolean(token_nv, "rawok");
}
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t num_redact_snaps = 0;
uint64_t *redact_snaps = NULL;
VERIFY3B(dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_redact_snaps,
&redact_snaps), ==, B_TRUE);
fnvlist_add_uint64_array(token_nv, "redact_snaps",
redact_snaps, num_redact_snaps);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS) == 0) {
uint64_t num_redact_snaps = 0, int_size = 0;
uint64_t *redact_snaps = NULL;
VERIFY0(zap_length(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, &int_size,
&num_redact_snaps));
ASSERT3U(int_size, ==, sizeof (uint64_t));
redact_snaps = kmem_alloc(int_size * num_redact_snaps,
KM_SLEEP);
VERIFY0(zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, int_size,
num_redact_snaps, redact_snaps));
fnvlist_add_uint64_array(token_nv, "book_redact_snaps",
redact_snaps, num_redact_snaps);
kmem_free(redact_snaps, int_size * num_redact_snaps);
}
packed = fnvlist_pack(token_nv, &packed_size);
fnvlist_free(token_nv);
compressed = kmem_alloc(packed_size, KM_SLEEP);
compressed_size = gzip_compress(packed, compressed,
packed_size, packed_size, 6);
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum);
size_t alloc_size = compressed_size * 2 + 1;
str = kmem_alloc(alloc_size, KM_SLEEP);
for (int i = 0; i < compressed_size; i++) {
size_t offset = i * 2;
(void) snprintf(str + offset, alloc_size - offset,
"%02x", compressed[i]);
}
str[compressed_size * 2] = '\0';
char *propval = kmem_asprintf("%u-%llx-%llx-%s",
ZFS_SEND_RESUME_TOKEN_VERSION,
(longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str);
kmem_free(packed, packed_size);
kmem_free(str, alloc_size);
kmem_free(compressed, packed_size);
return (propval);
}
/*
* Returns a string that represents the receive resume state token. It should
* be freed with strfree(). NULL is returned if no resume state is present.
*/
char *
get_receive_resume_token(dsl_dataset_t *ds)
{
/*
* A failed "newfs" (e.g. full) resumable receive leaves
* the stats set on this dataset. Check here for the prop.
*/
char *token = get_receive_resume_token_impl(ds);
if (token != NULL)
return (token);
/*
* A failed incremental resumable receive leaves the
* stats set on our child named "%recv". Check the child
* for the prop.
*/
/* 6 extra bytes for /%recv */
char name[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, name);
if (strlcat(name, "/", sizeof (name)) < sizeof (name) &&
strlcat(name, recv_clone_name, sizeof (name)) < sizeof (name) &&
dsl_dataset_hold(ds->ds_dir->dd_pool, name, FTAG, &recv_ds) == 0) {
token = get_receive_resume_token_impl(recv_ds);
dsl_dataset_rele(recv_ds, FTAG);
}
return (token);
}
uint64_t
dsl_get_refratio(dsl_dataset_t *ds)
{
uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
(dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
dsl_dataset_phys(ds)->ds_compressed_bytes);
return (ratio);
}
uint64_t
dsl_get_logicalreferenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
}
uint64_t
dsl_get_compressratio(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_get_refratio(ds));
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_compressratio(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_used(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_dataset_phys(ds)->ds_unique_bytes);
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_used(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_creation(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_time);
}
uint64_t
dsl_get_creationtxg(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_txg);
}
uint64_t
dsl_get_refquota(dsl_dataset_t *ds)
{
return (ds->ds_quota);
}
uint64_t
dsl_get_refreservation(dsl_dataset_t *ds)
{
return (ds->ds_reserved);
}
uint64_t
dsl_get_guid(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_guid);
}
uint64_t
dsl_get_unique(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_unique_bytes);
}
uint64_t
dsl_get_objsetid(dsl_dataset_t *ds)
{
return (ds->ds_object);
}
uint64_t
dsl_get_userrefs(dsl_dataset_t *ds)
{
return (ds->ds_userrefs);
}
uint64_t
dsl_get_defer_destroy(dsl_dataset_t *ds)
{
return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
}
uint64_t
dsl_get_referenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_referenced_bytes);
}
uint64_t
dsl_get_numclones(dsl_dataset_t *ds)
{
ASSERT(ds->ds_is_snapshot);
return (dsl_dataset_phys(ds)->ds_num_children - 1);
}
uint64_t
dsl_get_inconsistent(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
1 : 0);
}
uint64_t
dsl_get_redacted(dsl_dataset_t *ds)
{
return (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
}
uint64_t
dsl_get_available(dsl_dataset_t *ds)
{
uint64_t refdbytes = dsl_get_referenced(ds);
uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
availbytes +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
}
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (refdbytes < ds->ds_quota) {
availbytes = MIN(availbytes,
ds->ds_quota - refdbytes);
} else {
availbytes = 0;
}
}
return (availbytes);
}
int
dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_dataset_t *prev;
int err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err == 0) {
uint64_t comp, uncomp;
err = dsl_dataset_space_written(prev, ds, written,
&comp, &uncomp);
dsl_dataset_rele(prev, FTAG);
}
return (err);
}
/*
* 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
*/
int
dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
dsl_dataset_name(ds->ds_prev, snap);
return (0);
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval)
{
uint64_t nsnaps;
uint64_t *snaps;
if (dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &nsnaps, &snaps)) {
fnvlist_add_uint64_array(propval, ZPROP_VALUE, snaps,
nsnaps);
}
}
/*
* Returns the mountpoint property and source for the given dataset in the value
* and source buffers. The value buffer must be at least as large as MAXPATHLEN
* and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
* Returns 0 on success and an error on failure.
*/
int
dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source)
{
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Retrieve the mountpoint value stored in the zap object */
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
ZAP_MAXVALUELEN, value, source);
if (error != 0) {
return (error);
}
/*
* Process the dsname and source to find the full mountpoint string.
* Can be skipped for 'legacy' or 'none'.
*/
if (value[0] == '/') {
char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
ASSERT0(strncmp(dsname, source, strlen(source)));
relpath = dsname + strlen(source);
if (relpath[0] == '/')
relpath++;
}
spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
char *mnt = value;
if (value[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
mnt = value + 1;
mnt = kmem_strdup(mnt);
if (relpath[0] == '\0') {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
root, mnt);
} else {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
root, mnt, relpath[0] == '@' ? "" : "/",
relpath);
}
kmem_free(buf, ZAP_MAXVALUELEN);
kmem_strfree(mnt);
}
return (0);
}
void
dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
dsl_get_refratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
dsl_get_logicalreferenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
dsl_get_compressratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
dsl_get_used(ds));
if (ds->ds_is_snapshot) {
get_clones_stat(ds, nv);
} else {
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dsl_get_prev_snap(ds, buf) == 0)
dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
buf);
dsl_dir_stats(ds->ds_dir, nv);
}
nvlist_t *propval = fnvlist_alloc();
dsl_get_redact_snaps(ds, propval);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
propval);
nvlist_free(propval);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
dsl_get_available(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
dsl_get_referenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
dsl_get_creation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
dsl_get_creationtxg(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
dsl_get_refquota(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
dsl_get_refreservation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
dsl_get_guid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
dsl_get_unique(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
dsl_get_objsetid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
dsl_get_userrefs(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
dsl_get_defer_destroy(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOTS_CHANGED,
dsl_dir_snap_cmtime(ds->ds_dir).tv_sec);
dsl_dataset_crypt_stats(ds, nv);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
uint64_t written;
if (dsl_get_written(ds, &written) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
written);
}
}
if (!dsl_dataset_is_snapshot(ds)) {
char *token = get_receive_resume_token(ds);
if (token != NULL) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, token);
kmem_strfree(token);
}
}
}
void
dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
stat->dds_creation_txg = dsl_get_creationtxg(ds);
stat->dds_inconsistent = dsl_get_inconsistent(ds);
stat->dds_guid = dsl_get_guid(ds);
stat->dds_redacted = dsl_get_redacted(ds);
stat->dds_origin[0] = '\0';
if (ds->ds_is_snapshot) {
stat->dds_is_snapshot = B_TRUE;
stat->dds_num_clones = dsl_get_numclones(ds);
} else {
stat->dds_is_snapshot = B_FALSE;
stat->dds_num_clones = 0;
if (dsl_dir_is_clone(ds->ds_dir)) {
dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
}
}
}
uint64_t
dsl_dataset_fsid_guid(dsl_dataset_t *ds)
{
return (ds->ds_fsid_guid);
}
void
dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
*refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
*availbytesp +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (*refdbytesp < ds->ds_quota)
*availbytesp = MIN(*availbytesp,
ds->ds_quota - *refdbytesp);
else
*availbytesp = 0;
}
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
*usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
*availobjsp = DN_MAX_OBJECT - *usedobjsp;
}
boolean_t
dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
uint64_t birth;
ASSERT(dsl_pool_config_held(dp));
if (snap == NULL)
return (B_FALSE);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
birth = dsl_dataset_get_blkptr(ds)->blk_birth;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
objset_t *os, *os_snap;
/*
* It may be that only the ZIL differs, because it was
* reset in the head. Don't count that as being
* modified.
*/
if (dmu_objset_from_ds(ds, &os) != 0)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
return (memcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
return (B_FALSE);
}
static int
dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
(void) dp;
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
int error;
uint64_t val;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
if (error != 0) {
/* ignore nonexistent snapshots */
return (error == ENOENT ? 0 : error);
}
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
int
dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
int error;
error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
if (error != 0)
return (error);
if (ddrsa->ddrsa_recursive) {
error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_check_impl, ddrsa,
DS_FIND_CHILDREN);
} else {
error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
}
dsl_dataset_rele(hds, FTAG);
return (error);
}
static int
dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_dataset_t *ds;
uint64_t val;
dmu_tx_t *tx = ddrsa->ddrsa_tx;
int error;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT) {
/* ignore nonexistent snapshots */
return (0);
}
VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
/* log before we change the name */
spa_history_log_internal_ds(ds, "rename", tx,
"-> @%s", ddrsa->ddrsa_newsnapname);
VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
B_FALSE));
mutex_enter(&ds->ds_lock);
(void) strlcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname,
sizeof (ds->ds_snapname));
mutex_exit(&ds->ds_lock);
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
ddrsa->ddrsa_newsnapname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
ddrsa->ddrsa_tx = tx;
if (ddrsa->ddrsa_recursive) {
VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_sync_impl, ddrsa,
DS_FIND_CHILDREN));
} else {
VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
}
dsl_dataset_rele(hds, FTAG);
}
int
dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive)
{
dsl_dataset_rename_snapshot_arg_t ddrsa;
ddrsa.ddrsa_fsname = fsname;
ddrsa.ddrsa_oldsnapname = oldsnapname;
ddrsa.ddrsa_newsnapname = newsnapname;
ddrsa.ddrsa_recursive = recursive;
return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
dsl_dataset_rename_snapshot_sync, &ddrsa,
1, ZFS_SPACE_CHECK_RESERVED));
}
/*
* If we're doing an ownership handoff, we need to make sure that there is
* only one long hold on the dataset. We're not allowed to change anything here
* so we don't permanently release the long hold or regular hold here. We want
* to do this only when syncing to avoid the dataset unexpectedly going away
* when we release the long hold.
*/
static int
dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
{
boolean_t held = B_FALSE;
if (!dmu_tx_is_syncing(tx))
return (0);
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_activity_lock);
uint64_t holds = zfs_refcount_count(&ds->ds_longholds) -
(owner != NULL ? 1 : 0);
/*
* The value of dd_activity_waiters can chance as soon as we drop the
* lock, but we're fine with that; new waiters coming in or old
* waiters leaving doesn't cause problems, since we're going to cancel
* waiters later anyway. The goal of this check is to verify that no
* non-waiters have long-holds, and all new long-holds will be
* prevented because we're holding the pool config as writer.
*/
if (holds != dd->dd_activity_waiters)
held = B_TRUE;
mutex_exit(&dd->dd_activity_lock);
if (held)
return (SET_ERROR(EBUSY));
return (0);
}
int
dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int64_t unused_refres_delta;
int error;
error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* must not be a snapshot */
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ESRCH));
}
/*
* No rollback to a snapshot created in the current txg, because
* the rollback may dirty the dataset and create blocks that are
* not reachable from the rootbp while having a birth txg that
* falls into the snapshot's range.
*/
if (dmu_tx_is_syncing(tx) &&
dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EAGAIN));
}
/*
* If the expected target snapshot is specified, then check that
* the latest snapshot is it.
*/
if (ddra->ddra_tosnap != NULL) {
dsl_dataset_t *snapds;
/* Check if the target snapshot exists at all. */
error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
if (error != 0) {
/*
* ESRCH is used to signal that the target snapshot does
* not exist, while ENOENT is used to report that
* the rolled back dataset does not exist.
* ESRCH is also used to cover other cases where the
* target snapshot is not related to the dataset being
* rolled back such as being in a different pool.
*/
if (error == ENOENT || error == EXDEV)
error = SET_ERROR(ESRCH);
dsl_dataset_rele(ds, FTAG);
return (error);
}
ASSERT(snapds->ds_is_snapshot);
/* Check if the snapshot is the latest snapshot indeed. */
if (snapds != ds->ds_prev) {
/*
* Distinguish between the case where the only problem
* is intervening snapshots (EEXIST) vs the snapshot
* not being a valid target for rollback (ESRCH).
*/
if (snapds->ds_dir == ds->ds_dir ||
(dsl_dir_is_clone(ds->ds_dir) &&
dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
snapds->ds_object)) {
error = SET_ERROR(EEXIST);
} else {
error = SET_ERROR(ESRCH);
}
dsl_dataset_rele(snapds, FTAG);
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(snapds, FTAG);
}
/* must not have any bookmarks after the most recent snapshot */
if (dsl_bookmark_latest_txg(ds) >
dsl_dataset_phys(ds)->ds_prev_snap_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* Check if the snap we are rolling back to uses more than
* the refquota.
*/
if (ds->ds_quota != 0 &&
dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* When we do the clone swap, we will temporarily use more space
* due to the refreservation (the head will no longer have any
* unique space, so the entire amount of the refreservation will need
* to be free). We will immediately destroy the clone, freeing
* this space, but the freeing happens over many txg's.
*/
unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
dsl_dataset_phys(ds)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds, *clone;
uint64_t cloneobj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
dsl_dataset_name(ds->ds_prev, namebuf);
fnvlist_add_string(ddra->ddra_result, "target", namebuf);
cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
dsl_dataset_zero_zil(ds, tx);
dsl_destroy_head_sync_impl(clone, tx);
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* Rolls back the given filesystem or volume to the most recent snapshot.
* The name of the most recent snapshot will be returned under key "target"
* in the result nvlist.
*
* If owner != NULL:
* - The existing dataset MUST be owned by the specified owner at entry
* - Upon return, dataset will still be held by the same owner, whether we
* succeed or not.
*
* This mode is required any time the existing filesystem is mounted. See
* notes above zfs_suspend_fs() for further details.
*/
int
dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result)
{
dsl_dataset_rollback_arg_t ddra;
ddra.ddra_fsname = fsname;
ddra.ddra_tosnap = tosnap;
ddra.ddra_owner = owner;
ddra.ddra_result = result;
return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra,
1, ZFS_SPACE_CHECK_RESERVED));
}
struct promotenode {
list_node_t link;
dsl_dataset_t *ds;
};
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
const void *tag);
static void promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag);
int
dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
int err;
uint64_t unused;
uint64_t ss_mv_cnt;
size_t max_snap_len;
boolean_t conflicting_snaps;
err = promote_hold(ddpa, dp, FTAG);
if (err != 0)
return (err);
hds = ddpa->ddpa_clone;
max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (SET_ERROR(EXDEV));
}
snap = list_head(&ddpa->shared_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
dsl_dataset_t *const origin_ds = snap->ds;
/*
* Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
* When doing a promote we must make sure the encryption root for
* both the target and the target's origin does not change to avoid
* needing to rewrap encryption keys
*/
err = dsl_dataset_promote_crypt_check(hds->ds_dir, origin_ds->ds_dir);
if (err != 0)
goto out;
/*
* Compute and check the amount of space to transfer. Since this is
* so expensive, don't do the preliminary check.
*/
if (!dmu_tx_is_syncing(tx)) {
promote_rele(ddpa, FTAG);
return (0);
}
/* compute origin's new unique space */
snap = list_tail(&ddpa->clone_snaps);
ASSERT(snap != NULL);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
&ddpa->unique, &unused, &unused);
/*
* Walk the snapshots that we are moving
*
* Compute space to transfer. Consider the incremental changes
* to used by each snapshot:
* (my used) = (prev's used) + (blocks born) - (blocks killed)
* So each snapshot gave birth to:
* (blocks born) = (my used) - (prev's used) + (blocks killed)
* So a sequence would look like:
* (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
* Which simplifies to:
* uN + kN + kN-1 + ... + k1 + k0
* Note however, if we stop before we reach the ORIGIN we get:
* uN + kN + kN-1 + ... + kM - uM-1
*/
conflicting_snaps = B_FALSE;
ss_mv_cnt = 0;
ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
uint64_t val, dlused, dlcomp, dluncomp;
dsl_dataset_t *ds = snap->ds;
ss_mv_cnt++;
/*
* If there are long holds, we won't be able to evict
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = SET_ERROR(EBUSY);
goto out;
}
/* Check that the snapshot name does not conflict */
VERIFY0(dsl_dataset_get_snapname(ds));
if (strlen(ds->ds_snapname) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds,
snap->ds->ds_snapname);
conflicting_snaps = B_TRUE;
} else if (err != ENOENT) {
goto out;
}
/* The very first snapshot does not have a deadlist */
if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
continue;
dsl_deadlist_space(&ds->ds_deadlist,
&dlused, &dlcomp, &dluncomp);
ddpa->used += dlused;
ddpa->comp += dlcomp;
ddpa->uncomp += dluncomp;
}
/*
* Check that bookmarks that are being transferred don't have
* name conflicts.
*/
for (dsl_bookmark_node_t *dbn = avl_first(&origin_ds->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = AVL_NEXT(&origin_ds->ds_bookmarks, dbn)) {
if (strlen(dbn->dbn_name) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
zfs_bookmark_phys_t bm;
err = dsl_bookmark_lookup_impl(ddpa->ddpa_clone,
dbn->dbn_name, &bm);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds, dbn->dbn_name);
conflicting_snaps = B_TRUE;
} else if (err == ESRCH) {
err = 0;
}
if (err != 0) {
goto out;
}
}
/*
* In order to return the full list of conflicting snapshots, we check
* whether there was a conflict after traversing all of them.
*/
if (conflicting_snaps) {
err = SET_ERROR(EEXIST);
goto out;
}
/*
* If we are a clone of a clone then we never reached ORIGIN,
* so we need to subtract out the clone origin's used space.
*/
if (ddpa->origin_origin) {
ddpa->used -=
dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
ddpa->comp -=
dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
ddpa->uncomp -=
dsl_dataset_phys(ddpa->origin_origin)->
ds_uncompressed_bytes;
}
/* Check that there is enough space and limit headroom here */
err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
0, ss_mv_cnt, ddpa->used, ddpa->cr, ddpa->proc);
if (err != 0)
goto out;
/*
* Compute the amounts of space that will be used by snapshots
* after the promotion (for both origin and clone). For each,
* it is the amount of space that will be on all of their
* deadlists (that was not born before their new origin).
*/
if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
uint64_t space;
/*
* Note, typically this will not be a clone of a clone,
* so dd_origin_txg will be < TXG_INITIAL, so
* these snaplist_space() -> dsl_deadlist_space_range()
* calls will be fast because they do not have to
* iterate over all bps.
*/
snap = list_head(&ddpa->origin_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
err = snaplist_space(&ddpa->shared_snaps,
snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
if (err != 0)
goto out;
err = snaplist_space(&ddpa->clone_snaps,
snap->ds->ds_dir->dd_origin_txg, &space);
if (err != 0)
goto out;
ddpa->cloneusedsnap += space;
}
if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
DD_FLAG_USED_BREAKDOWN) {
err = snaplist_space(&ddpa->origin_snaps,
dsl_dataset_phys(origin_ds)->ds_creation_txg,
&ddpa->originusedsnap);
if (err != 0)
goto out;
}
out:
promote_rele(ddpa, FTAG);
return (err);
}
void
dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds;
dsl_dataset_t *origin_head;
dsl_dir_t *dd;
dsl_dir_t *odd = NULL;
uint64_t oldnext_obj;
int64_t delta;
ASSERT(nvlist_empty(ddpa->err_ds));
VERIFY0(promote_hold(ddpa, dp, FTAG));
hds = ddpa->ddpa_clone;
ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
snap = list_head(&ddpa->shared_snaps);
origin_ds = snap->ds;
dd = hds->ds_dir;
snap = list_head(&ddpa->origin_snaps);
origin_head = snap->ds;
/*
* We need to explicitly open odd, since origin_ds's dd will be
* changing.
*/
VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
NULL, FTAG, &odd));
dsl_dataset_promote_crypt_sync(hds->ds_dir, odd, tx);
/* change origin's next snap */
dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
snap = list_tail(&ddpa->clone_snaps);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
/* change the origin's next clone */
if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
dsl_dataset_remove_from_next_clones(origin_ds,
snap->ds->ds_object, tx);
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
oldnext_obj, tx));
}
/* change origin */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
dmu_buf_will_dirty(odd->dd_dbuf, tx);
dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
origin_head->ds_dir->dd_origin_txg =
dsl_dataset_phys(origin_ds)->ds_creation_txg;
/* change dd_clone entries */
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
hds->ds_object, tx));
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
origin_head->ds_object, tx));
if (dsl_dir_phys(dd)->dd_clones == 0) {
dsl_dir_phys(dd)->dd_clones =
zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
}
/*
* Move bookmarks to this dir.
*/
dsl_bookmark_node_t *dbn_next;
for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = dbn_next) {
dbn_next = AVL_NEXT(&origin_head->ds_bookmarks, dbn);
avl_remove(&origin_head->ds_bookmarks, dbn);
VERIFY0(zap_remove(dp->dp_meta_objset,
origin_head->ds_bookmarks_obj, dbn->dbn_name, tx));
dsl_bookmark_node_add(hds, dbn, tx);
}
dsl_bookmark_next_changed(hds, origin_ds, tx);
/* move snapshots to this dir */
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
dsl_dataset_t *ds = snap->ds;
/*
* Property callbacks are registered to a particular
* dsl_dir. Since ours is changing, evict the objset
* so that they will be unregistered from the old dsl_dir.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* move snap name entry */
VERIFY0(dsl_dataset_get_snapname(ds));
VERIFY0(dsl_dataset_snap_remove(origin_head,
ds->ds_snapname, tx, B_TRUE));
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
8, 1, &ds->ds_object, tx));
dsl_fs_ss_count_adjust(hds->ds_dir, 1,
DD_FIELD_SNAPSHOT_COUNT, tx);
/* change containing dsl_dir */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
ASSERT3P(ds->ds_dir, ==, odd);
dsl_dir_rele(ds->ds_dir, ds);
VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
NULL, ds, &ds->ds_dir));
/* move any clone references */
if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *cnds;
uint64_t o;
if (za.za_first_integer == oldnext_obj) {
/*
* We've already moved the
* origin's reference.
*/
continue;
}
VERIFY0(dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &cnds));
o = dsl_dir_phys(cnds->ds_dir)->
dd_head_dataset_obj;
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, o, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, o, tx));
dsl_dataset_rele(cnds, FTAG);
}
zap_cursor_fini(&zc);
}
ASSERT(!dsl_prop_hascb(ds));
}
/*
* Change space accounting.
* Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
* both be valid, or both be 0 (resulting in delta == 0). This
* is true for each of {clone,origin} independently.
*/
delta = ddpa->cloneusedsnap -
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, >=, 0);
ASSERT3U(ddpa->used, >=, delta);
dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(dd, DD_USED_HEAD,
ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
delta = ddpa->originusedsnap -
dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, <=, 0);
ASSERT3U(ddpa->used, >=, -delta);
dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(odd, DD_USED_HEAD,
-ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
/*
* Since livelists are specific to a clone's origin txg, they
* are no longer accurate. Destroy the livelist from the clone being
* promoted. If the origin dataset is a clone, destroy its livelist
* as well.
*/
dsl_dir_remove_livelist(dd, tx, B_TRUE);
dsl_dir_remove_livelist(odd, tx, B_TRUE);
/* log history record */
spa_history_log_internal_ds(hds, "promote", tx, " ");
dsl_dir_rele(odd, FTAG);
promote_rele(ddpa, FTAG);
/*
* Transfer common error blocks from old head to new head.
*/
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_HEAD_ERRLOG)) {
uint64_t old_head = origin_head->ds_object;
uint64_t new_head = hds->ds_object;
spa_swap_errlog(dp->dp_spa, new_head, old_head, tx);
}
}
/*
* Make a list of dsl_dataset_t's for the snapshots between first_obj
* (exclusive) and last_obj (inclusive). The list will be in reverse
* order (last_obj will be the list_head()). If first_obj == 0, do all
* snapshots back to this dataset's origin.
*/
static int
snaplist_make(dsl_pool_t *dp,
uint64_t first_obj, uint64_t last_obj, list_t *l, const void *tag)
{
uint64_t obj = last_obj;
list_create(l, sizeof (struct promotenode),
offsetof(struct promotenode, link));
while (obj != first_obj) {
dsl_dataset_t *ds;
struct promotenode *snap;
int err;
err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
ASSERT(err != ENOENT);
if (err != 0)
return (err);
if (first_obj == 0)
first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
snap->ds = ds;
list_insert_tail(l, snap);
obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
return (0);
}
static int
snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
{
struct promotenode *snap;
*spacep = 0;
for (snap = list_head(l); snap; snap = list_next(l, snap)) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
mintxg, UINT64_MAX, &used, &comp, &uncomp);
*spacep += used;
}
return (0);
}
static void
snaplist_destroy(list_t *l, const void *tag)
{
struct promotenode *snap;
if (l == NULL || !list_link_active(&l->list_head))
return;
- while ((snap = list_tail(l)) != NULL) {
- list_remove(l, snap);
+ while ((snap = list_remove_tail(l)) != NULL) {
dsl_dataset_rele(snap->ds, tag);
kmem_free(snap, sizeof (*snap));
}
list_destroy(l);
}
static int
promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, const void *tag)
{
int error;
dsl_dir_t *dd;
struct promotenode *snap;
error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
&ddpa->ddpa_clone);
if (error != 0)
return (error);
dd = ddpa->ddpa_clone->ds_dir;
if (ddpa->ddpa_clone->ds_is_snapshot ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
&ddpa->shared_snaps, tag);
if (error != 0)
goto out;
error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
&ddpa->clone_snaps, tag);
if (error != 0)
goto out;
snap = list_head(&ddpa->shared_snaps);
ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
&ddpa->origin_snaps, tag);
if (error != 0)
goto out;
if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
tag, &ddpa->origin_origin);
if (error != 0)
goto out;
}
out:
if (error != 0)
promote_rele(ddpa, tag);
return (error);
}
static void
promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag)
{
snaplist_destroy(&ddpa->shared_snaps, tag);
snaplist_destroy(&ddpa->clone_snaps, tag);
snaplist_destroy(&ddpa->origin_snaps, tag);
if (ddpa->origin_origin != NULL)
dsl_dataset_rele(ddpa->origin_origin, tag);
dsl_dataset_rele(ddpa->ddpa_clone, tag);
}
/*
* Promote a clone.
*
* If it fails due to a conflicting snapshot name, "conflsnap" will be filled
* in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
*/
int
dsl_dataset_promote(const char *name, char *conflsnap)
{
dsl_dataset_promote_arg_t ddpa = { 0 };
uint64_t numsnaps;
int error;
nvpair_t *snap_pair;
objset_t *os;
/*
* We will modify space proportional to the number of
* snapshots. Compute numsnaps.
*/
error = dmu_objset_hold(name, FTAG, &os);
if (error != 0)
return (error);
error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
&numsnaps);
dmu_objset_rele(os, FTAG);
if (error != 0)
return (error);
ddpa.ddpa_clonename = name;
ddpa.err_ds = fnvlist_alloc();
ddpa.cr = CRED();
ddpa.proc = curproc;
error = dsl_sync_task(name, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa,
2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
/*
* Return the first conflicting snapshot found.
*/
snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
if (snap_pair != NULL && conflsnap != NULL)
(void) strlcpy(conflsnap, nvpair_name(snap_pair),
ZFS_MAX_DATASET_NAME_LEN);
fnvlist_free(ddpa.err_ds);
return (error);
}
int
dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
{
/*
* "slack" factor for received datasets with refquota set on them.
* See the bottom of this function for details on its use.
*/
uint64_t refquota_slack = (uint64_t)DMU_MAX_ACCESS *
spa_asize_inflation;
int64_t unused_refres_delta;
/* they should both be heads */
if (clone->ds_is_snapshot ||
origin_head->ds_is_snapshot)
return (SET_ERROR(EINVAL));
/* if we are not forcing, the branch point should be just before them */
if (!force && clone->ds_prev != origin_head->ds_prev)
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_dir != clone->ds_prev->ds_dir)
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force &&
dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_handoff_check(origin_head, owner, tx))
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* The clone can't be too much over the head's refquota.
*
* To ensure that the entire refquota can be used, we allow one
* transaction to exceed the refquota. Therefore, this check
* needs to also allow for the space referenced to be more than the
* refquota. The maximum amount of space that one transaction can use
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
* overage ensures that we are able to receive a filesystem that
* exceeds the refquota on the source system.
*
* So that overage is the refquota_slack we use below.
*/
if (origin_head->ds_quota != 0 &&
dsl_dataset_phys(clone)->ds_referenced_bytes >
origin_head->ds_quota + refquota_slack)
return (SET_ERROR(EDQUOT));
return (0);
}
static void
dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
dsl_dataset_t *origin, dmu_tx_t *tx)
{
uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
ASSERT(dsl_pool_sync_context(dp));
clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
if (clone_remap_dl_obj != 0) {
dsl_deadlist_close(&clone->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(clone, tx);
}
if (origin_remap_dl_obj != 0) {
dsl_deadlist_close(&origin->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(origin, tx);
}
if (clone_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(origin,
clone_remap_dl_obj, tx);
dsl_deadlist_open(&origin->ds_remap_deadlist,
dp->dp_meta_objset, clone_remap_dl_obj);
}
if (origin_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(clone,
origin_remap_dl_obj, tx);
dsl_deadlist_open(&clone->ds_remap_deadlist,
dp->dp_meta_objset, origin_remap_dl_obj);
}
}
void
dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
ASSERT(clone->ds_reserved == 0);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
*/
ASSERT(origin_head->ds_quota == 0 ||
dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
DMU_MAX_ACCESS * spa_asize_inflation);
ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
dsl_dir_cancel_waiters(origin_head->ds_dir);
/*
* Swap per-dataset feature flags.
*/
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
ASSERT(!dsl_dataset_feature_is_active(clone, f));
ASSERT(!dsl_dataset_feature_is_active(origin_head, f));
continue;
}
boolean_t clone_inuse = dsl_dataset_feature_is_active(clone, f);
void *clone_feature = clone->ds_feature[f];
boolean_t origin_head_inuse =
dsl_dataset_feature_is_active(origin_head, f);
void *origin_head_feature = origin_head->ds_feature[f];
if (clone_inuse)
dsl_dataset_deactivate_feature_impl(clone, f, tx);
if (origin_head_inuse)
dsl_dataset_deactivate_feature_impl(origin_head, f, tx);
if (clone_inuse) {
dsl_dataset_activate_feature(origin_head->ds_object, f,
clone_feature, tx);
origin_head->ds_feature[f] = clone_feature;
}
if (origin_head_inuse) {
dsl_dataset_activate_feature(clone->ds_object, f,
origin_head_feature, tx);
clone->ds_feature[f] = origin_head_feature;
}
}
dmu_buf_will_dirty(clone->ds_dbuf, tx);
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
if (clone->ds_objset != NULL) {
dmu_objset_evict(clone->ds_objset);
clone->ds_objset = NULL;
}
if (origin_head->ds_objset != NULL) {
dmu_objset_evict(origin_head->ds_objset);
origin_head->ds_objset = NULL;
}
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
/*
* Reset origin's unique bytes.
*/
{
dsl_dataset_t *origin = clone->ds_prev;
uint64_t comp, uncomp;
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_deadlist_space_range(&clone->ds_deadlist,
dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
&dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
}
/* swap blkptrs */
{
rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
blkptr_t tmp;
tmp = dsl_dataset_phys(origin_head)->ds_bp;
dsl_dataset_phys(origin_head)->ds_bp =
dsl_dataset_phys(clone)->ds_bp;
dsl_dataset_phys(clone)->ds_bp = tmp;
rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
rrw_exit(&clone->ds_bp_rwlock, FTAG);
}
/* set dd_*_bytes */
{
int64_t dused, dcomp, duncomp;
uint64_t cdl_used, cdl_comp, cdl_uncomp;
uint64_t odl_used, odl_comp, odl_uncomp;
ASSERT3U(dsl_dir_phys(clone->ds_dir)->
dd_used_breakdown[DD_USED_SNAP], ==, 0);
dsl_deadlist_space(&clone->ds_deadlist,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space(&origin_head->ds_deadlist,
&odl_used, &odl_comp, &odl_uncomp);
dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
cdl_used -
(dsl_dataset_phys(origin_head)->ds_referenced_bytes +
odl_used);
dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
cdl_comp -
(dsl_dataset_phys(origin_head)->ds_compressed_bytes +
odl_comp);
duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
cdl_uncomp -
(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
odl_uncomp);
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
dused, dcomp, duncomp, tx);
dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
-dused, -dcomp, -duncomp, tx);
/*
* The difference in the space used by snapshots is the
* difference in snapshot space due to the head's
* deadlist (since that's the only thing that's
* changing that affects the snapused).
*/
dsl_deadlist_space_range(&clone->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space_range(&origin_head->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&odl_used, &odl_comp, &odl_uncomp);
dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
/* swap ds_*_bytes */
SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
dsl_dataset_phys(clone)->ds_referenced_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
dsl_dataset_phys(clone)->ds_compressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
dsl_dataset_phys(clone)->ds_uncompressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
dsl_dataset_phys(clone)->ds_unique_bytes);
/* apply any parent delta for change in unconsumed refreservation */
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
unused_refres_delta, 0, 0, tx);
/*
* Swap deadlists.
*/
dsl_deadlist_close(&clone->ds_deadlist);
dsl_deadlist_close(&origin_head->ds_deadlist);
SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(origin_head)->ds_deadlist_obj);
dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
/*
* If there is a bookmark at the origin, its "next dataset" is
* changing, so we need to reset its FBN.
*/
dsl_bookmark_next_changed(origin_head, origin_head->ds_prev, tx);
dsl_scan_ds_clone_swapped(origin_head, clone, tx);
/*
* Destroy any livelists associated with the clone or the origin,
* since after the swap the corresponding livelists are no longer
* valid.
*/
dsl_dir_remove_livelist(clone->ds_dir, tx, B_TRUE);
dsl_dir_remove_livelist(origin_head->ds_dir, tx, B_TRUE);
spa_history_log_internal_ds(clone, "clone swap", tx,
"parent=%s", origin_head->ds_dir->dd_myname);
}
/*
* Given a pool name and a dataset object number in that pool,
* return the name of that dataset.
*/
int
dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int error;
error = dsl_pool_hold(pname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
if (error == 0) {
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
int
dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
{
int error = 0;
ASSERT3S(asize, >, 0);
/*
* *ref_rsrv is the portion of asize that will come from any
* unconsumed refreservation space.
*/
*ref_rsrv = 0;
mutex_enter(&ds->ds_lock);
/*
* Make a space adjustment for reserved bytes.
*/
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
ASSERT3U(*used, >=,
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*used -=
(ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*ref_rsrv =
asize - MIN(asize, parent_delta(ds, asize + inflight));
}
if (!check_quota || ds->ds_quota == 0) {
mutex_exit(&ds->ds_lock);
return (0);
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
ds->ds_quota) {
if (inflight > 0 ||
dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
error = SET_ERROR(ERESTART);
else
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
return (error);
}
typedef struct dsl_dataset_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dataset_set_qr_arg_t;
static int
dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
if (ds->ds_quota != newval) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ds->ds_quota = newval;
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t refquota)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refquota;
return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
dsl_dataset_set_refquota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_lock);
if (!DS_UNIQUE_IS_ACCURATE(ds))
dsl_dataset_recalc_head_uniq(ds);
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
mutex_exit(&ds->ds_lock);
if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
uint64_t delta = MAX(unique, newval) -
MAX(unique, ds->ds_reserved);
if (delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx)
{
uint64_t newval;
uint64_t unique;
int64_t delta;
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
source, sizeof (value), 1, &value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
delta = MAX(0, (int64_t)(newval - unique)) -
MAX(0, (int64_t)(ds->ds_reserved - unique));
ds->ds_reserved = newval;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
mutex_exit(&ds->ds_dir->dd_lock);
}
static void
dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_dataset_set_refreservation_sync_impl(ds,
ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t refreservation)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refreservation;
return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
dsl_dataset_set_refreservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
typedef struct dsl_dataset_set_compression_arg {
const char *ddsca_name;
zprop_source_t ddsca_source;
uint64_t ddsca_value;
} dsl_dataset_set_compression_arg_t;
static int
dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
if (f == SPA_FEATURE_NONE)
return (SET_ERROR(EINVAL));
if (!spa_feature_is_enabled(dp->dp_spa, f))
return (SET_ERROR(ENOTSUP));
return (0);
}
static void
dsl_dataset_set_compression_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
ASSERT3S(f, !=, SPA_FEATURE_NONE);
ASSERT3S(spa_feature_table[f].fi_type, ==, ZFEATURE_TYPE_BOOLEAN);
VERIFY0(dsl_dataset_hold(dp, ddsca->ddsca_name, FTAG, &ds));
if (zfeature_active(f, ds->ds_feature[f]) != B_TRUE) {
ds->ds_feature_activation[f] = (void *)B_TRUE;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
uint64_t compression)
{
dsl_dataset_set_compression_arg_t ddsca;
/*
* The sync task is only required for zstd in order to activate
* the feature flag when the property is first set.
*/
if (ZIO_COMPRESS_ALGO(compression) != ZIO_COMPRESS_ZSTD)
return (0);
ddsca.ddsca_name = dsname;
ddsca.ddsca_source = source;
ddsca.ddsca_value = compression;
return (dsl_sync_task(dsname, dsl_dataset_set_compression_check,
dsl_dataset_set_compression_sync, &ddsca, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Return (in *usedp) the amount of space referenced by "new" that was not
* referenced at the time the bookmark corresponds to. "New" may be a
* snapshot or a head. The bookmark must be before new, in
* new's filesystem (or its origin) -- caller verifies this.
*
* The written space is calculated by considering two components: First, we
* ignore any freed space, and calculate the written as new's used space
* minus old's used space. Next, we add in the amount of space that was freed
* between the two time points, thus reducing new's used space relative to
* old's. Specifically, this is the space that was born before
* zbm_creation_txg, and freed before new (ie. on new's deadlist or a
* previous deadlist).
*
* space freed [---------------------]
* snapshots ---O-------O--------O-------O------
* bookmark new
*
* Note, the bookmark's zbm_*_bytes_refd must be valid, but if the HAS_FBN
* flag is not set, we will calculate the freed_before_next based on the
* next snapshot's deadlist, rather than using zbm_*_freed_before_next_snap.
*/
static int
dsl_dataset_space_written_impl(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
dsl_pool_t *dp = new->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_is_snapshot(new)) {
ASSERT3U(bmp->zbm_creation_txg, <,
dsl_dataset_phys(new)->ds_creation_txg);
}
*usedp = 0;
*usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
*usedp -= bmp->zbm_referenced_bytes_refd;
*compp = 0;
*compp += dsl_dataset_phys(new)->ds_compressed_bytes;
*compp -= bmp->zbm_compressed_bytes_refd;
*uncompp = 0;
*uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
*uncompp -= bmp->zbm_uncompressed_bytes_refd;
dsl_dataset_t *snap = new;
while (dsl_dataset_phys(snap)->ds_prev_snap_txg >
bmp->zbm_creation_txg) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds_deadlist,
0, bmp->zbm_creation_txg,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
uint64_t snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
if (snap != new)
dsl_dataset_rele(snap, FTAG);
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
if (err != 0)
break;
}
/*
* We might not have the FBN if we are calculating written from
* a snapshot (because we didn't know the correct "next" snapshot
* until now).
*/
if (bmp->zbm_flags & ZBM_FLAG_HAS_FBN) {
*usedp += bmp->zbm_referenced_freed_before_next_snap;
*compp += bmp->zbm_compressed_freed_before_next_snap;
*uncompp += bmp->zbm_uncompressed_freed_before_next_snap;
} else {
ASSERT3U(dsl_dataset_phys(snap)->ds_prev_snap_txg, ==,
bmp->zbm_creation_txg);
uint64_t used, comp, uncomp;
dsl_deadlist_space(&snap->ds_deadlist, &used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
}
if (snap != new)
dsl_dataset_rele(snap, FTAG);
return (err);
}
/*
* Return (in *usedp) the amount of space written in new that was not
* present at the time the bookmark corresponds to. New may be a
* snapshot or the head. Old must be a bookmark before new, in
* new's filesystem (or its origin) -- caller verifies this.
*/
int
dsl_dataset_space_written_bookmark(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!(bmp->zbm_flags & ZBM_FLAG_HAS_FBN))
return (SET_ERROR(ENOTSUP));
return (dsl_dataset_space_written_impl(bmp, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space written in new that is not
* present in oldsnap. New may be a snapshot or the head. Old must be
* a snapshot before new, in new's filesystem (or its origin). If not then
* fail and return EINVAL.
*/
int
dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!dsl_dataset_is_before(new, oldsnap, 0))
return (SET_ERROR(EINVAL));
zfs_bookmark_phys_t zbm = { 0 };
dsl_dataset_phys_t *dsp = dsl_dataset_phys(oldsnap);
zbm.zbm_guid = dsp->ds_guid;
zbm.zbm_creation_txg = dsp->ds_creation_txg;
zbm.zbm_creation_time = dsp->ds_creation_time;
zbm.zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
zbm.zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
zbm.zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
/*
* If oldsnap is the origin (or origin's origin, ...) of new,
* we can't easily calculate the effective FBN. Therefore,
* we do not set ZBM_FLAG_HAS_FBN, so that the _impl will calculate
* it relative to the correct "next": the next snapshot towards "new",
* rather than the next snapshot in oldsnap's dsl_dir.
*/
return (dsl_dataset_space_written_impl(&zbm, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
* lastsnap, and all snapshots in between are deleted.
*
* blocks that would be freed [---------------------------]
* snapshots ---O-------O--------O-------O--------O
* firstsnap lastsnap
*
* This is the set of blocks that were born after the snap before firstsnap,
* (birth > firstsnap->prev_snap_txg) and died before the snap after the
* last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
* We calculate this by iterating over the relevant deadlists (from the snap
* after lastsnap, backward to the snap after firstsnap), summing up the
* space on the deadlist that was born after the snap before firstsnap.
*/
int
dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
dsl_dataset_t *lastsnap,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
uint64_t snapobj;
dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
ASSERT(firstsnap->ds_is_snapshot);
ASSERT(lastsnap->ds_is_snapshot);
/*
* Check that the snapshots are in the same dsl_dir, and firstsnap
* is before lastsnap.
*/
if (firstsnap->ds_dir != lastsnap->ds_dir ||
dsl_dataset_phys(firstsnap)->ds_creation_txg >
dsl_dataset_phys(lastsnap)->ds_creation_txg)
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;
snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
while (snapobj != firstsnap->ds_object) {
dsl_dataset_t *ds;
uint64_t used, comp, uncomp;
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
if (err != 0)
break;
dsl_deadlist_space_range(&ds->ds_deadlist,
dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
ASSERT3U(snapobj, !=, 0);
dsl_dataset_rele(ds, FTAG);
}
return (err);
}
/*
* Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
* For example, they could both be snapshots of the same filesystem, and
* 'earlier' is before 'later'. Or 'earlier' could be the origin of
* 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
* filesystem. Or 'earlier' could be the origin's origin.
*
* If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
*/
boolean_t
dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg)
{
dsl_pool_t *dp = later->ds_dir->dd_pool;
int error;
boolean_t ret;
ASSERT(dsl_pool_config_held(dp));
ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
if (earlier_txg == 0)
earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
if (later->ds_is_snapshot &&
earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
return (B_FALSE);
if (later->ds_dir == earlier->ds_dir)
return (B_TRUE);
/*
* We check dd_origin_obj explicitly here rather than using
* dsl_dir_is_clone() so that we will return TRUE if "earlier"
* is $ORIGIN@$ORIGIN. dsl_dataset_space_written() depends on
* this behavior.
*/
if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == 0)
return (B_FALSE);
dsl_dataset_t *origin;
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
if (error != 0)
return (B_FALSE);
if (dsl_dataset_phys(origin)->ds_creation_txg == earlier_txg &&
origin->ds_dir == earlier->ds_dir) {
dsl_dataset_rele(origin, FTAG);
return (B_TRUE);
}
ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
dsl_dataset_rele(origin, FTAG);
return (ret);
}
void
dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
}
boolean_t
dsl_dataset_is_zapified(dsl_dataset_t *ds)
{
dmu_object_info_t doi;
dmu_object_info_from_db(ds->ds_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
boolean_t
dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
{
return (dsl_dataset_is_zapified(ds) &&
zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
}
uint64_t
dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
{
uint64_t remap_deadlist_obj;
int err;
if (!dsl_dataset_is_zapified(ds))
return (0);
err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
&remap_deadlist_obj);
if (err != 0) {
VERIFY3S(err, ==, ENOENT);
return (0);
}
ASSERT(remap_deadlist_obj != 0);
return (remap_deadlist_obj);
}
boolean_t
dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
{
EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
dsl_dataset_get_remap_deadlist_object(ds) != 0);
return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
}
static void
dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
ASSERT(obj != 0);
dsl_dataset_zapify(ds, tx);
VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
}
static void
dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
{
VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
}
void
dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_object;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dataset_remap_deadlist_exists(ds));
remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
dsl_dataset_unset_remap_deadlist_object(ds, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_obj;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
/*
* Currently we only create remap deadlists when there are indirect
* vdevs with referenced mappings.
*/
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
remap_deadlist_obj = dsl_deadlist_clone(
&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_dataset_set_remap_deadlist_object(ds,
remap_deadlist_obj, tx);
dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
remap_deadlist_obj);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
uint64_t num_redact_snaps, dmu_tx_t *tx)
{
uint64_t dsobj = ds->ds_object;
struct feature_type_uint64_array_arg *ftuaa =
kmem_zalloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = (int64_t)num_redact_snaps;
if (num_redact_snaps > 0) {
ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
KM_SLEEP);
memcpy(ftuaa->array, redact_snaps, num_redact_snaps *
sizeof (uint64_t));
}
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,
ftuaa, tx);
ds->ds_feature[SPA_FEATURE_REDACTED_DATASETS] = ftuaa;
}
/*
* Find and return (in *oldest_dsobj) the oldest snapshot of the dsobj
* dataset whose birth time is >= min_txg.
*/
int
dsl_dataset_oldest_snapshot(spa_t *spa, uint64_t head_ds, uint64_t min_txg,
uint64_t *oldest_dsobj)
{
dsl_dataset_t *ds;
dsl_pool_t *dp = spa->spa_dsl_pool;
int error = dsl_dataset_hold_obj(dp, head_ds, FTAG, &ds);
if (error != 0)
return (error);
uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
while (prev_obj != 0 && min_txg < prev_obj_txg) {
dsl_dataset_rele(ds, FTAG);
if ((error = dsl_dataset_hold_obj(dp, prev_obj,
FTAG, &ds)) != 0)
return (error);
prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
*oldest_dsobj = ds->ds_object;
dsl_dataset_rele(ds, FTAG);
return (0);
}
ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, UINT, ZMOD_RW,
"Max allowed record size");
ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
"Allow mounting of redacted datasets");
ZFS_MODULE_PARAM(zfs, zfs_, snapshot_history_enabled, INT, ZMOD_RW,
"Include snapshot events in pool history/events");
EXPORT_SYMBOL(dsl_dataset_hold);
EXPORT_SYMBOL(dsl_dataset_hold_flags);
EXPORT_SYMBOL(dsl_dataset_hold_obj);
EXPORT_SYMBOL(dsl_dataset_hold_obj_flags);
EXPORT_SYMBOL(dsl_dataset_own);
EXPORT_SYMBOL(dsl_dataset_own_obj);
EXPORT_SYMBOL(dsl_dataset_name);
EXPORT_SYMBOL(dsl_dataset_rele);
EXPORT_SYMBOL(dsl_dataset_rele_flags);
EXPORT_SYMBOL(dsl_dataset_disown);
EXPORT_SYMBOL(dsl_dataset_tryown);
EXPORT_SYMBOL(dsl_dataset_create_sync);
EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
EXPORT_SYMBOL(dsl_dataset_snapshot_check);
EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
EXPORT_SYMBOL(dsl_dataset_promote);
EXPORT_SYMBOL(dsl_dataset_user_hold);
EXPORT_SYMBOL(dsl_dataset_user_release);
EXPORT_SYMBOL(dsl_dataset_get_holds);
EXPORT_SYMBOL(dsl_dataset_get_blkptr);
EXPORT_SYMBOL(dsl_dataset_get_spa);
EXPORT_SYMBOL(dsl_dataset_modified_since_snap);
EXPORT_SYMBOL(dsl_dataset_space_written);
EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
EXPORT_SYMBOL(dsl_dataset_sync);
EXPORT_SYMBOL(dsl_dataset_block_born);
EXPORT_SYMBOL(dsl_dataset_block_kill);
EXPORT_SYMBOL(dsl_dataset_dirty);
EXPORT_SYMBOL(dsl_dataset_stats);
EXPORT_SYMBOL(dsl_dataset_fast_stat);
EXPORT_SYMBOL(dsl_dataset_space);
EXPORT_SYMBOL(dsl_dataset_fsid_guid);
EXPORT_SYMBOL(dsl_dsobj_to_dsname);
EXPORT_SYMBOL(dsl_dataset_check_quota);
EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl);
EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index eac9828a204a..bbe6a03d620f 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -1,2500 +1,2499 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2014 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/metaslab.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/sunddi.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zvol.h>
#include <sys/zthr.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
/*
* This controls if we verify the ZVOL quota or not.
* Currently, quotas are not implemented for ZVOLs.
* The quota size is the size of the ZVOL.
* The size of the volume already implies the ZVOL size quota.
* The quota mechanism can introduce a significant performance drop.
*/
static int zvol_enforce_quotas = B_TRUE;
/*
* Filesystem and Snapshot Limits
* ------------------------------
*
* These limits are used to restrict the number of filesystems and/or snapshots
* that can be created at a given level in the tree or below. A typical
* use-case is with a delegated dataset where the administrator wants to ensure
* that a user within the zone is not creating too many additional filesystems
* or snapshots, even though they're not exceeding their space quota.
*
* The filesystem and snapshot counts are stored as extensible properties. This
* capability is controlled by a feature flag and must be enabled to be used.
* Once enabled, the feature is not active until the first limit is set. At
* that point, future operations to create/destroy filesystems or snapshots
* will validate and update the counts.
*
* Because the count properties will not exist before the feature is active,
* the counts are updated when a limit is first set on an uninitialized
* dsl_dir node in the tree (The filesystem/snapshot count on a node includes
* all of the nested filesystems/snapshots. Thus, a new leaf node has a
* filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
* snapshot count properties on a node indicate uninitialized counts on that
* node.) When first setting a limit on an uninitialized node, the code starts
* at the filesystem with the new limit and descends into all sub-filesystems
* to add the count properties.
*
* In practice this is lightweight since a limit is typically set when the
* filesystem is created and thus has no children. Once valid, changing the
* limit value won't require a re-traversal since the counts are already valid.
* When recursively fixing the counts, if a node with a limit is encountered
* during the descent, the counts are known to be valid and there is no need to
* descend into that filesystem's children. The counts on filesystems above the
* one with the new limit will still be uninitialized, unless a limit is
* eventually set on one of those filesystems. The counts are always recursively
* updated when a limit is set on a dataset, unless there is already a limit.
* When a new limit value is set on a filesystem with an existing limit, it is
* possible for the new limit to be less than the current count at that level
* since a user who can change the limit is also allowed to exceed the limit.
*
* Once the feature is active, then whenever a filesystem or snapshot is
* created, the code recurses up the tree, validating the new count against the
* limit at each initialized level. In practice, most levels will not have a
* limit set. If there is a limit at any initialized level up the tree, the
* check must pass or the creation will fail. Likewise, when a filesystem or
* snapshot is destroyed, the counts are recursively adjusted all the way up
* the initialized nodes in the tree. Renaming a filesystem into different point
* in the tree will first validate, then update the counts on each branch up to
* the common ancestor. A receive will also validate the counts and then update
* them.
*
* An exception to the above behavior is that the limit is not enforced if the
* user has permission to modify the limit. This is primarily so that
* recursive snapshots in the global zone always work. We want to prevent a
* denial-of-service in which a lower level delegated dataset could max out its
* limit and thus block recursive snapshots from being taken in the global zone.
* Because of this, it is possible for the snapshot count to be over the limit
* and snapshots taken in the global zone could cause a lower level dataset to
* hit or exceed its limit. The administrator taking the global zone recursive
* snapshot should be aware of this side-effect and behave accordingly.
* For consistency, the filesystem limit is also not enforced if the user can
* modify the limit.
*
* The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
* and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
* dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
* dsl_dir_init_fs_ss_count().
*/
static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
typedef struct ddulrt_arg {
dsl_dir_t *ddulrta_dd;
uint64_t ddlrta_txg;
} ddulrt_arg_t;
static void
dsl_dir_evict_async(void *dbu)
{
dsl_dir_t *dd = dbu;
int t;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
dd->dd_dbuf = NULL;
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
if (dd->dd_parent)
dsl_dir_async_rele(dd->dd_parent, dd);
spa_async_close(dd->dd_pool->dp_spa, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
}
int
dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, const void *tag, dsl_dir_t **ddp)
{
dmu_buf_t *dbuf;
dsl_dir_t *dd;
dmu_object_info_t doi;
int err;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
if (err != 0)
return (err);
dd = dmu_buf_get_user(dbuf);
dmu_object_info_from_db(dbuf, &doi);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
if (dd == NULL) {
dsl_dir_t *winner;
dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
dd->dd_object = ddobj;
dd->dd_dbuf = dbuf;
dd->dd_pool = dp;
mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
dsl_prop_init(dd);
if (dsl_dir_is_zapified(dd)) {
err = zap_lookup(dp->dp_meta_objset,
ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj);
if (err == 0) {
/* check for on-disk format errata */
if (dsl_dir_incompatible_encryption_version(
dd)) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
}
} else if (err != ENOENT) {
goto errout;
}
}
if (dsl_dir_phys(dd)->dd_parent_obj) {
err = dsl_dir_hold_obj(dp,
dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
&dd->dd_parent);
if (err != 0)
goto errout;
if (tail) {
#ifdef ZFS_DEBUG
uint64_t foundobj;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj, tail,
sizeof (foundobj), 1, &foundobj);
ASSERT(err || foundobj == ddobj);
#endif
(void) strlcpy(dd->dd_myname, tail,
sizeof (dd->dd_myname));
} else {
err = zap_value_search(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj,
ddobj, 0, dd->dd_myname);
}
if (err != 0)
goto errout;
} else {
(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
sizeof (dd->dd_myname));
}
if (dsl_dir_is_clone(dd)) {
dmu_buf_t *origin_bonus;
dsl_dataset_phys_t *origin_phys;
/*
* We can't open the origin dataset, because
* that would require opening this dsl_dir.
* Just look at its phys directly instead.
*/
err = dmu_bonus_hold(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_origin_obj, FTAG,
&origin_bonus);
if (err != 0)
goto errout;
origin_phys = origin_bonus->db_data;
dd->dd_origin_txg =
origin_phys->ds_creation_txg;
dmu_buf_rele(origin_bonus, FTAG);
if (dsl_dir_is_zapified(dd)) {
uint64_t obj;
err = zap_lookup(dp->dp_meta_objset,
dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj);
if (err == 0)
dsl_dir_livelist_open(dd, obj);
else if (err != ENOENT)
goto errout;
}
}
if (dsl_dir_is_zapified(dd)) {
inode_timespec_t t = {0};
(void) zap_lookup(dp->dp_meta_objset, ddobj,
DD_FIELD_SNAPSHOTS_CHANGED,
sizeof (uint64_t),
sizeof (inode_timespec_t) / sizeof (uint64_t),
&t);
dd->dd_snap_cmtime = t;
}
dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
&dd->dd_dbuf);
winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
if (winner != NULL) {
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dd = winner;
} else {
spa_open_ref(dp->dp_spa, dd);
}
}
/*
* The dsl_dir_t has both open-to-close and instantiate-to-evict
* holds on the spa. We need the open-to-close holds because
* otherwise the spa_refcnt wouldn't change when we open a
* dir which the spa also has open, so we could incorrectly
* think it was OK to unload/export/destroy the pool. We need
* the instantiate-to-evict hold because the dsl_dir_t has a
* pointer to the dd_pool, which has a pointer to the spa_t.
*/
spa_open_ref(dp->dp_spa, tag);
ASSERT3P(dd->dd_pool, ==, dp);
ASSERT3U(dd->dd_object, ==, ddobj);
ASSERT3P(dd->dd_dbuf, ==, dbuf);
*ddp = dd;
return (0);
errout:
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
void
dsl_dir_rele(dsl_dir_t *dd, const void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/*
* Remove a reference to the given dsl dir that is being asynchronously
* released. Async releases occur from a taskq performing eviction of
* dsl datasets and dirs. This process is identical to a normal release
* with the exception of using the async API for releasing the reference on
* the spa.
*/
void
dsl_dir_async_rele(dsl_dir_t *dd, const void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_async_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
void
dsl_dir_name(dsl_dir_t *dd, char *buf)
{
if (dd->dd_parent) {
dsl_dir_name(dd->dd_parent, buf);
VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
} else {
buf[0] = '\0';
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/*
* recursive mutex so that we can use
* dprintf_dd() with dd_lock held
*/
mutex_enter(&dd->dd_lock);
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&dd->dd_lock);
} else {
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
}
}
/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
int
dsl_dir_namelen(dsl_dir_t *dd)
{
int result = 0;
if (dd->dd_parent) {
/* parent's name + 1 for the "/" */
result = dsl_dir_namelen(dd->dd_parent) + 1;
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/* see dsl_dir_name */
mutex_enter(&dd->dd_lock);
result += strlen(dd->dd_myname);
mutex_exit(&dd->dd_lock);
} else {
result += strlen(dd->dd_myname);
}
return (result);
}
static int
getcomponent(const char *path, char *component, const char **nextp)
{
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
* if the first thing is an @ or /, it had better be an
* @ and it had better not have any more ats or slashes,
* and it had better have something after the @.
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (SET_ERROR(EINVAL));
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, p - path + 1);
p++;
} else if (p[0] == '@') {
/*
* if the next separator is an @, there better not be
* any more slashes.
*/
if (strchr(path, '/'))
return (SET_ERROR(EINVAL));
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, p - path + 1);
} else {
panic("invalid p=%p", (void *)p);
}
*nextp = p;
return (0);
}
/*
* Return the dsl_dir_t, and possibly the last component which couldn't
* be found in *tail. The name must be in the specified dsl_pool_t. This
* thread must hold the dp_config_rwlock for the pool. Returns NULL if the
* path is bogus, or if tail==NULL and we couldn't parse the whole name.
* (*tail)[0] == '@' means that the last component is a snapshot.
*/
int
dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dir_t **ddp, const char **tailp)
{
char *buf;
const char *spaname, *next, *nextnext = NULL;
int err;
dsl_dir_t *dd;
uint64_t ddobj;
buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
err = getcomponent(name, buf, &next);
if (err != 0)
goto error;
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0) {
err = SET_ERROR(EXDEV);
goto error;
}
ASSERT(dsl_pool_config_held(dp));
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
if (err != 0) {
goto error;
}
while (next != NULL) {
dsl_dir_t *child_dd;
err = getcomponent(next, buf, &nextnext);
if (err != 0)
break;
ASSERT(next[0] != '\0');
if (next[0] == '@')
break;
dprintf("looking up %s in obj%lld\n",
buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj,
buf, sizeof (ddobj), 1, &ddobj);
if (err != 0) {
if (err == ENOENT)
err = 0;
break;
}
err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
if (err != 0)
break;
dsl_dir_rele(dd, tag);
dd = child_dd;
next = nextnext;
}
if (err != 0) {
dsl_dir_rele(dd, tag);
goto error;
}
/*
* It's an error if there's more than one component left, or
* tailp==NULL and there's any component left.
*/
if (next != NULL &&
(tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
if (err == 0)
*ddp = dd;
error:
kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
return (err);
}
/*
* If the counts are already initialized for this filesystem and its
* descendants then do nothing, otherwise initialize the counts.
*
* The counts on this filesystem, and those below, may be uninitialized due to
* either the use of a pre-existing pool which did not support the
* filesystem/snapshot limit feature, or one in which the feature had not yet
* been enabled.
*
* Recursively descend the filesystem tree and update the filesystem/snapshot
* counts on each filesystem below, then update the cumulative count on the
* current filesystem. If the filesystem already has a count set on it,
* then we know that its counts, and the counts on the filesystems below it,
* are already correct, so we don't have to update this filesystem.
*/
static void
dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
{
uint64_t my_fs_cnt = 0;
uint64_t my_ss_cnt = 0;
dsl_pool_t *dp = dd->dd_pool;
objset_t *os = dp->dp_meta_objset;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_dataset_t *ds;
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
ASSERT(dsl_pool_config_held(dp));
ASSERT(dmu_tx_is_syncing(tx));
dsl_dir_zapify(dd, tx);
/*
* If the filesystem count has already been initialized then we
* don't need to recurse down any further.
*/
if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
return;
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Iterate my child dirs */
for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
dsl_dir_t *chld_dd;
uint64_t count;
VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
&chld_dd));
/*
* Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (chld_dd->dd_myname[0] == '$') {
dsl_dir_rele(chld_dd, FTAG);
continue;
}
my_fs_cnt++; /* count this child */
dsl_dir_init_fs_ss_count(chld_dd, tx);
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
my_fs_cnt += count;
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
my_ss_cnt += count;
dsl_dir_rele(chld_dd, FTAG);
}
zap_cursor_fini(zc);
/* Count my snapshots (we counted children's snapshots above) */
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
/* Don't count temporary snapshots */
if (za->za_name[0] != '%')
my_ss_cnt++;
}
zap_cursor_fini(zc);
dsl_dataset_rele(ds, FTAG);
kmem_free(zc, sizeof (zap_cursor_t));
kmem_free(za, sizeof (zap_attribute_t));
/* we're in a sync task, update counts */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
}
static int
dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
int error;
error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
if (error != 0)
return (error);
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dd = ds->ds_dir;
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
dsl_dir_is_zapified(dd) &&
zap_contains(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EALREADY));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
spa_t *spa;
VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
spa = dsl_dataset_get_spa(ds);
if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Since the feature was not active and we're now setting a
* limit, increment the feature-active counter so that the
* feature becomes active for the first time.
*
* We are already in a sync task so we can update the MOS.
*/
spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
}
/*
* Since we are now setting a non-UINT64_MAX limit on the filesystem,
* we need to ensure the counts are correct. Descend down the tree from
* this point and update all of the counts to be accurate.
*/
dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* Make sure the feature is enabled and activate it if necessary.
* Since we're setting a limit, ensure the on-disk counts are valid.
* This is only called by the ioctl path when setting a limit value.
*
* We do not need to validate the new limit, since users who can change the
* limit are also allowed to exceed the limit.
*/
int
dsl_dir_activate_fs_ss_limit(const char *ddname)
{
int error;
error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
ZFS_SPACE_CHECK_RESERVED);
if (error == EALREADY)
error = 0;
return (error);
}
/*
* Used to determine if the filesystem_limit or snapshot_limit should be
* enforced. We allow the limit to be exceeded if the user has permission to
* write the property value. We pass in the creds that we got in the open
* context since we will always be the GZ root in syncing context. We also have
* to handle the case where we are allowed to change the limit on the current
* dataset, but there may be another limit in the tree above.
*
* We can never modify these two properties within a non-global zone. In
* addition, the other checks are modeled on zfs_secpolicy_write_perms. We
* can't use that function since we are already holding the dp_config_rwlock.
* In addition, we already have the dd and dealing with snapshots is simplified
* in this code.
*/
typedef enum {
ENFORCE_ALWAYS,
ENFORCE_NEVER,
ENFORCE_ABOVE
} enforce_res_t;
static enforce_res_t
dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
cred_t *cr, proc_t *proc)
{
enforce_res_t enforce = ENFORCE_ALWAYS;
uint64_t obj;
dsl_dataset_t *ds;
uint64_t zoned;
const char *zonedstr;
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
#ifdef _KERNEL
if (crgetzoneid(cr) != GLOBAL_ZONEID)
return (ENFORCE_ALWAYS);
/*
* We are checking the saved credentials of the user process, which is
* not the current process. Note that we can't use secpolicy_zfs(),
* because it only works if the cred is that of the current process (on
* Linux).
*/
if (secpolicy_zfs_proc(cr, proc) == 0)
return (ENFORCE_NEVER);
#else
(void) proc;
#endif
if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
return (ENFORCE_ALWAYS);
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
return (ENFORCE_ALWAYS);
zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
/* Only root can access zoned fs's from the GZ */
enforce = ENFORCE_ALWAYS;
} else {
if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
enforce = ENFORCE_ABOVE;
}
dsl_dataset_rele(ds, FTAG);
return (enforce);
}
/*
* Check if adding additional child filesystem(s) would exceed any filesystem
* limits or adding additional snapshot(s) would exceed any snapshot limits.
* The prop argument indicates which limit to check.
*
* Note that all filesystem limits up to the root (or the highest
* initialized) filesystem or the given ancestor must be satisfied.
*/
int
dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
{
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t limit, count;
const char *count_prop;
enforce_res_t enforce;
int err = 0;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
/*
* We don't enforce the limit for temporary snapshots. This is
* indicated by a NULL cred_t argument.
*/
if (cr == NULL)
return (0);
count_prop = DD_FIELD_SNAPSHOT_COUNT;
} else {
count_prop = DD_FIELD_FILESYSTEM_COUNT;
}
/*
* If we're allowed to change the limit, don't enforce the limit
* e.g. this can happen if a snapshot is taken by an administrative
* user in the global zone (i.e. a recursive snapshot by root).
* However, we must handle the case of delegated permissions where we
* are allowed to change the limit on the current dataset, but there
* is another limit in the tree above.
*/
enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
if (enforce == ENFORCE_NEVER)
return (0);
/*
* e.g. if renaming a dataset with no snapshots, count adjustment
* is 0.
*/
if (delta == 0)
return (0);
/*
* If an ancestor has been provided, stop checking the limit once we
* hit that dir. We need this during rename so that we don't overcount
* the check once we recurse up to the common ancestor.
*/
if (ancestor == dd)
return (0);
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know there is no limit here (or above). The counts are
* not valid on this node and we know we won't touch this node's counts.
*/
if (!dsl_dir_is_zapified(dd))
return (0);
err = zap_lookup(os, dd->dd_object,
count_prop, sizeof (count), 1, &count);
if (err == ENOENT)
return (0);
if (err != 0)
return (err);
err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
B_FALSE);
if (err != 0)
return (err);
/* Is there a limit which we've hit? */
if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
return (SET_ERROR(EDQUOT));
if (dd->dd_parent != NULL)
err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
ancestor, cr, proc);
return (err);
}
/*
* Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
* parents. When a new filesystem/snapshot is created, increment the count on
* all parents, and when a filesystem/snapshot is destroyed, decrement the
* count.
*/
void
dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
dmu_tx_t *tx)
{
int err;
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t count;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
/*
* We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (dd->dd_myname[0] == '$' && strcmp(prop,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
return;
}
/*
* e.g. if renaming a dataset with no snapshots, count adjustment is 0
*/
if (delta == 0)
return;
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know the counts are not valid on this node and we
* know we shouldn't touch this node's counts. An uninitialized count
* on the node indicates that either the feature has not yet been
* activated or there are no limits on this part of the tree.
*/
if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
prop, sizeof (count), 1, &count)) == ENOENT)
return;
VERIFY0(err);
count += delta;
/* Use a signed verify to make sure we're not neg. */
VERIFY3S(count, >=, 0);
VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
tx));
/* Roll up this additional count into our ancestors */
if (dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
}
uint64_t
dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t ddobj;
dsl_dir_phys_t *ddphys;
dmu_buf_t *dbuf;
ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
if (pds) {
VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
name, sizeof (uint64_t), 1, &ddobj, tx));
} else {
/* it's the root dir */
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
}
VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
ddphys = dbuf->db_data;
ddphys->dd_creation_time = gethrestime_sec();
if (pds) {
ddphys->dd_parent_obj = pds->dd_object;
/* update the filesystem counts */
dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
}
ddphys->dd_props_zapobj = zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
ddphys->dd_child_dir_zapobj = zap_create(mos,
DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
dmu_buf_rele(dbuf, FTAG);
return (ddobj);
}
boolean_t
dsl_dir_is_clone(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_origin_obj &&
(dd->dd_pool->dp_origin_snap == NULL ||
dsl_dir_phys(dd)->dd_origin_obj !=
dd->dd_pool->dp_origin_snap->ds_object));
}
uint64_t
dsl_dir_get_used(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_bytes);
}
uint64_t
dsl_dir_get_compressed(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_compressed_bytes);
}
uint64_t
dsl_dir_get_quota(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_quota);
}
uint64_t
dsl_dir_get_reservation(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_reserved);
}
uint64_t
dsl_dir_get_compressratio(dsl_dir_t *dd)
{
/* a fixed point number, 100x the ratio */
return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
(dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
dsl_dir_phys(dd)->dd_compressed_bytes));
}
uint64_t
dsl_dir_get_logicalused(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
}
uint64_t
dsl_dir_get_usedsnap(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
}
uint64_t
dsl_dir_get_usedds(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
}
uint64_t
dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
}
uint64_t
dsl_dir_get_usedchild(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
}
void
dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
{
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
int
dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
mutex_enter(&dd->dd_lock);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
dsl_dir_get_quota(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
dsl_dir_get_reservation(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
dsl_dir_get_logicalused(dd));
if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
dsl_dir_get_usedsnap(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
dsl_dir_get_usedds(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
dsl_dir_get_usedrefreserv(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
dsl_dir_get_usedchild(dd));
}
mutex_exit(&dd->dd_lock);
uint64_t count;
if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
count);
}
if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
count);
}
if (dsl_dir_is_clone(dd)) {
char buf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dir_get_origin(dd, buf);
dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
}
}
void
dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(dsl_dir_phys(dd));
if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(dd->dd_dbuf, dd);
}
}
static int64_t
parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
{
uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
uint64_t new_accounted =
MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
return (new_accounted - old_accounted);
}
void
dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
mutex_enter(&dd->dd_lock);
ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
(u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
mutex_exit(&dd->dd_lock);
/* release the hold from dsl_dir_dirty */
dmu_buf_rele(dd->dd_dbuf, dd);
}
static uint64_t
dsl_dir_space_towrite(dsl_dir_t *dd)
{
uint64_t space = 0;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (int i = 0; i < TXG_SIZE; i++)
space += dd->dd_space_towrite[i & TXG_MASK];
return (space);
}
/*
* How much space would dd have available if ancestor had delta applied
* to it? If ondiskonly is set, we're only interested in what's
* on-disk, not estimated pending changes.
*/
uint64_t
dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
{
uint64_t parentspace, myspace, quota, used;
/*
* If there are no restrictions otherwise, assume we have
* unlimited space available.
*/
quota = UINT64_MAX;
parentspace = UINT64_MAX;
if (dd->dd_parent != NULL) {
parentspace = dsl_dir_space_available(dd->dd_parent,
ancestor, delta, ondiskonly);
}
mutex_enter(&dd->dd_lock);
if (dsl_dir_phys(dd)->dd_quota != 0)
quota = dsl_dir_phys(dd)->dd_quota;
used = dsl_dir_phys(dd)->dd_used_bytes;
if (!ondiskonly)
used += dsl_dir_space_towrite(dd);
if (dd->dd_parent == NULL) {
uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL);
quota = MIN(quota, poolsize);
}
if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
/*
* We have some space reserved, in addition to what our
* parent gave us.
*/
parentspace += dsl_dir_phys(dd)->dd_reserved - used;
}
if (dd == ancestor) {
ASSERT(delta <= 0);
ASSERT(used >= -delta);
used += delta;
if (parentspace != UINT64_MAX)
parentspace -= delta;
}
if (used > quota) {
/* over quota */
myspace = 0;
} else {
/*
* the lesser of the space provided by our parent and
* the space left in our quota
*/
myspace = MIN(parentspace, quota - used);
}
mutex_exit(&dd->dd_lock);
return (myspace);
}
struct tempreserve {
list_node_t tr_node;
dsl_dir_t *tr_ds;
uint64_t tr_size;
};
static int
dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
boolean_t ignorequota, list_t *tr_list,
dmu_tx_t *tx, boolean_t first)
{
uint64_t txg;
uint64_t quota;
struct tempreserve *tr;
int retval;
uint64_t ext_quota;
uint64_t ref_rsrv;
top_of_function:
txg = tx->tx_txg;
retval = EDQUOT;
ref_rsrv = 0;
ASSERT3U(txg, !=, 0);
ASSERT3S(asize, >, 0);
mutex_enter(&dd->dd_lock);
/*
* Check against the dsl_dir's quota. We don't add in the delta
* when checking for over-quota because they get one free hit.
*/
uint64_t est_inflight = dsl_dir_space_towrite(dd);
for (int i = 0; i < TXG_SIZE; i++)
est_inflight += dd->dd_tempreserved[i];
uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
/*
* On the first iteration, fetch the dataset's used-on-disk and
* refreservation values. Also, if checkrefquota is set, test if
* allocating this space would exceed the dataset's refquota.
*/
if (first && tx->tx_objset) {
int error;
dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
error = dsl_dataset_check_quota(ds, !netfree,
asize, est_inflight, &used_on_disk, &ref_rsrv);
if (error != 0) {
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (error);
}
}
/*
* If this transaction will result in a net free of space,
* we want to let it through.
*/
if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0 ||
(tx->tx_objset && dmu_objset_type(tx->tx_objset) == DMU_OST_ZVOL &&
zvol_enforce_quotas == B_FALSE))
quota = UINT64_MAX;
else
quota = dsl_dir_phys(dd)->dd_quota;
/*
* Adjust the quota against the actual pool size at the root
* minus any outstanding deferred frees.
* To ensure that it's possible to remove files from a full
* pool without inducing transient overcommits, we throttle
* netfree transactions against a quota that is slightly larger,
* but still within the pool's allocation slop. In cases where
* we're very close to full, this will allow a steady trickle of
* removes to get through.
*/
if (dd->dd_parent == NULL) {
uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
(netfree) ?
ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
if (avail < quota) {
quota = avail;
retval = SET_ERROR(ENOSPC);
}
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes
* or deferred frees (which may free up space for us).
*/
ext_quota = quota >> 5;
if (quota == UINT64_MAX)
ext_quota = 0;
if (used_on_disk >= quota) {
/* Quota exceeded */
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (retval);
} else if (used_on_disk + est_inflight >= quota + ext_quota) {
if (est_inflight > 0 || used_on_disk < quota) {
retval = SET_ERROR(ERESTART);
} else {
ASSERT3U(used_on_disk, >=, quota);
if (retval == ENOSPC && (used_on_disk - quota) <
dsl_pool_deferred_space(dd->dd_pool)) {
retval = SET_ERROR(ERESTART);
}
}
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n",
(u_longlong_t)used_on_disk>>10,
(u_longlong_t)est_inflight>>10,
(u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (retval);
}
/* We need to up our estimated delta before dropping dd_lock */
dd->dd_tempreserved[txg & TXG_MASK] += asize;
uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
asize - ref_rsrv);
mutex_exit(&dd->dd_lock);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_ds = dd;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
/* see if it's OK with our parent */
if (dd->dd_parent != NULL && parent_rsrv != 0) {
/*
* Recurse on our parent without recursion. This has been
* observed to be potentially large stack usage even within
* the test suite. Largest seen stack was 7632 bytes on linux.
*/
dd = dd->dd_parent;
asize = parent_rsrv;
ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
first = B_FALSE;
goto top_of_function;
}
return (0);
}
/*
* Reserve space in this dsl_dir, to be used in this tx's txg.
* After the space has been dirtied (and dsl_dir_willuse_space()
* has been called), the reservation should be canceled, using
* dsl_dir_tempreserve_clear().
*/
int
dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
{
int err;
list_t *tr_list;
if (asize == 0) {
*tr_cookiep = NULL;
return (0);
}
tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(tr_list, sizeof (struct tempreserve),
offsetof(struct tempreserve, tr_node));
ASSERT3S(asize, >, 0);
err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
if (err == 0) {
struct tempreserve *tr;
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_size = lsize;
list_insert_tail(tr_list, tr);
} else {
if (err == EAGAIN) {
/*
* If arc_memory_throttle() detected that pageout
* is running and we are low on memory, we delay new
* non-pageout transactions to give pageout an
* advantage.
*
* It is unfortunate to be delaying while the caller's
* locks are held.
*/
txg_delay(dd->dd_pool, tx->tx_txg,
MSEC2NSEC(10), MSEC2NSEC(10));
err = SET_ERROR(ERESTART);
}
}
if (err == 0) {
err = dsl_dir_tempreserve_impl(dd, asize, netfree,
B_FALSE, tr_list, tx, B_TRUE);
}
if (err != 0)
dsl_dir_tempreserve_clear(tr_list, tx);
else
*tr_cookiep = tr_list;
return (err);
}
/*
* Clear a temporary reservation that we previously made with
* dsl_dir_tempreserve_space().
*/
void
dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
{
int txgidx = tx->tx_txg & TXG_MASK;
list_t *tr_list = tr_cookie;
struct tempreserve *tr;
ASSERT3U(tx->tx_txg, !=, 0);
if (tr_cookie == NULL)
return;
- while ((tr = list_head(tr_list)) != NULL) {
+ while ((tr = list_remove_head(tr_list)) != NULL) {
if (tr->tr_ds) {
mutex_enter(&tr->tr_ds->dd_lock);
ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
tr->tr_size);
tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
mutex_exit(&tr->tr_ds->dd_lock);
} else {
arc_tempreserve_clear(tr->tr_size);
}
- list_remove(tr_list, tr);
kmem_free(tr, sizeof (struct tempreserve));
}
kmem_free(tr_list, sizeof (list_t));
}
/*
* This should be called from open context when we think we're going to write
* or free space, for example when dirtying data. Be conservative; it's okay
* to write less space or free more, but we don't want to write more or free
* less than the amount specified.
*
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD
* version however it has been adjusted to use an iterative rather than
* recursive algorithm to minimize stack usage.
*/
void
dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
{
int64_t parent_space;
uint64_t est_used;
do {
mutex_enter(&dd->dd_lock);
if (space > 0)
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
est_used = dsl_dir_space_towrite(dd) +
dsl_dir_phys(dd)->dd_used_bytes;
parent_space = parent_delta(dd, est_used, space);
mutex_exit(&dd->dd_lock);
/* Make sure that we clean up dd_space_to* */
dsl_dir_dirty(dd, tx);
dd = dd->dd_parent;
space = parent_space;
} while (space && dd);
}
/* call from syncing context when we actually write/free space for this dd */
void
dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/*
* dsl_dataset_set_refreservation_sync_impl() calls this with
* dd_lock held, so that it can atomically update
* ds->ds_reserved and the dsl_dir accounting, so that
* dsl_dataset_check_quota() can see dataset and dir accounting
* consistently.
*/
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
if (needlock)
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
ddp->dd_used_breakdown[type] += used;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
if (needlock)
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
if (delta == 0 ||
!(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
ddp->dd_used_breakdown[oldtype] >= delta :
ddp->dd_used_breakdown[newtype] >= -delta);
ASSERT(ddp->dd_used_bytes >= ABS(delta));
ddp->dd_used_breakdown[oldtype] -= delta;
ddp->dd_used_breakdown[newtype] += delta;
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
int64_t compressed, int64_t uncompressed, int64_t tonew,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(tonew - used <= 0 ||
ddp->dd_used_breakdown[oldtype] >= tonew - used);
ASSERT(tonew >= 0 ||
ddp->dd_used_breakdown[newtype] >= -tonew);
ddp->dd_used_breakdown[oldtype] -= tonew - used;
ddp->dd_used_breakdown[newtype] += tonew;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
typedef struct dsl_dir_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dir_set_qr_arg_t;
static int
dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t towrite, newval;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_prop_predict(ds->ds_dir, "quota",
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_dir->dd_lock);
/*
* If we are doing the preliminary check in open context, and
* there are pending changes, then don't fail it, since the
* pending changes could under-estimate the amount of space to be
* freed up.
*/
towrite = dsl_dir_space_towrite(ds->ds_dir);
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
}
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = quota;
return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
dsl_dir_set_quota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
uint64_t newval, used, avail;
int error;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
dd = ds->ds_dir;
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
mutex_exit(&dd->dd_lock);
if (dd->dd_parent) {
avail = dsl_dir_space_available(dd->dd_parent,
NULL, 0, FALSE);
} else {
avail = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL) - used;
}
if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
uint64_t delta = MAX(used, newval) -
MAX(used, dsl_dir_phys(dd)->dd_reserved);
if (delta > avail ||
(dsl_dir_phys(dd)->dd_quota > 0 &&
newval > dsl_dir_phys(dd)->dd_quota))
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
return (error);
}
void
dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
{
uint64_t used;
int64_t delta;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
dsl_dir_phys(dd)->dd_reserved = value;
if (dd->dd_parent != NULL) {
/* Roll up this additional usage into our ancestors */
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
delta, 0, 0, tx);
}
mutex_exit(&dd->dd_lock);
}
static void
dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_RESERVATION),
(longlong_t)newval);
}
dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = reservation;
return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
dsl_dir_set_reservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static dsl_dir_t *
closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
{
for (; ds1; ds1 = ds1->dd_parent) {
dsl_dir_t *dd;
for (dd = ds2; dd; dd = dd->dd_parent) {
if (ds1 == dd)
return (dd);
}
}
return (NULL);
}
/*
* If delta is applied to dd, how much of that delta would be applied to
* ancestor? Syncing context only.
*/
static int64_t
would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
{
if (dd == ancestor)
return (delta);
mutex_enter(&dd->dd_lock);
delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
mutex_exit(&dd->dd_lock);
return (would_change(dd->dd_parent, delta, ancestor));
}
typedef struct dsl_dir_rename_arg {
const char *ddra_oldname;
const char *ddra_newname;
cred_t *ddra_cred;
proc_t *ddra_proc;
} dsl_dir_rename_arg_t;
typedef struct dsl_valid_rename_arg {
int char_delta;
int nest_delta;
} dsl_valid_rename_arg_t;
static int
dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp;
dsl_valid_rename_arg_t *dvra = arg;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, namebuf);
ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
int namelen = strlen(namebuf) + dvra->char_delta;
int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
dsl_valid_rename_arg_t dvra;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *mynewname;
int error;
/* target dir should exist */
error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
if (error != 0)
return (error);
/* new parent should exist */
error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
&newparent, &mynewname);
if (error != 0) {
dsl_dir_rele(dd, FTAG);
return (error);
}
/* can't rename to different pool */
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EXDEV));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EEXIST));
}
/* can't rename below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(newparent->dd_pool,
dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
dvra.char_delta = strlen(ddra->ddra_newname)
- strlen(ddra->ddra_oldname);
dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
- get_dataset_depth(ddra->ddra_oldname);
/* if the name length is growing, validate child name lengths */
if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
&dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
if (dmu_tx_is_syncing(tx)) {
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Although this is the check function and we don't
* normally make on-disk changes in check functions,
* we need to do that here.
*
* Ensure this portion of the tree's counts have been
* initialized in case the new parent has limits set.
*/
dsl_dir_init_fs_ss_count(dd, tx);
}
}
if (newparent != dd->dd_parent) {
/* is there enough space? */
uint64_t myspace =
MAX(dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_reserved);
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
if (dsl_dir_is_zapified(dd)) {
int err;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
/*
* have to add 1 for the filesystem itself that we're
* moving
*/
fs_cnt++;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
}
/* check for encryption errors */
error = dsl_dir_rename_crypt_check(dd, newparent);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EACCES));
}
/* no rename into our descendant */
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
newparent, fs_cnt, ss_cnt, myspace,
ddra->ddra_cred, ddra->ddra_proc);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (0);
}
static void
dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
const char *mynewname;
objset_t *mos = dp->dp_meta_objset;
VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
&mynewname));
ASSERT3P(mynewname, !=, NULL);
/* Log this before we change the name. */
spa_history_log_internal_dd(dd, "rename", tx,
"-> %s", ddra->ddra_newname);
if (newparent != dd->dd_parent) {
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
/*
* We already made sure the dd counts were initialized in the
* check function.
*/
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt));
/* add 1 for the filesystem itself that we're moving */
fs_cnt++;
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt));
}
dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
-dsl_dir_phys(dd)->dd_used_bytes,
-dsl_dir_phys(dd)->dd_compressed_bytes,
-dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD,
dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_compressed_bytes,
dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
if (dsl_dir_phys(dd)->dd_reserved >
dsl_dir_phys(dd)->dd_used_bytes) {
uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
dsl_dir_phys(dd)->dd_used_bytes;
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
-unused_rsrv, 0, 0, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
unused_rsrv, 0, 0, tx);
}
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/* remove from old parent zapobj */
VERIFY0(zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx));
(void) strlcpy(dd->dd_myname, mynewname,
sizeof (dd->dd_myname));
dsl_dir_rele(dd->dd_parent, dd);
dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
VERIFY0(dsl_dir_hold_obj(dp,
newparent->dd_object, NULL, dd, &dd->dd_parent));
/* add to new parent zapobj */
VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
dd->dd_myname, 8, 1, &dd->dd_object, tx));
/* TODO: A rename callback to avoid these layering violations. */
zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
ddra->ddra_newname, B_TRUE);
dsl_prop_notify_all(dd);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
}
int
dsl_dir_rename(const char *oldname, const char *newname)
{
dsl_dir_rename_arg_t ddra;
ddra.ddra_oldname = oldname;
ddra.ddra_newname = newname;
ddra.ddra_cred = CRED();
ddra.ddra_proc = curproc;
return (dsl_sync_task(oldname,
dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
3, ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
cred_t *cr, proc_t *proc)
{
dsl_dir_t *ancestor;
int64_t adelta;
uint64_t avail;
int err;
ancestor = closest_common_ancestor(sdd, tdd);
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (SET_ERROR(ENOSPC));
err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
return (0);
}
inode_timespec_t
dsl_dir_snap_cmtime(dsl_dir_t *dd)
{
inode_timespec_t t;
mutex_enter(&dd->dd_lock);
t = dd->dd_snap_cmtime;
mutex_exit(&dd->dd_lock);
return (t);
}
void
dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
inode_timespec_t t;
gethrestime(&t);
mutex_enter(&dd->dd_lock);
dd->dd_snap_cmtime = t;
if (spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_EXTENSIBLE_DATASET)) {
objset_t *mos = dd->dd_pool->dp_meta_objset;
uint64_t ddobj = dd->dd_object;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_update(mos, ddobj,
DD_FIELD_SNAPSHOTS_CHANGED,
sizeof (uint64_t),
sizeof (inode_timespec_t) / sizeof (uint64_t),
&t, tx));
}
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
}
boolean_t
dsl_dir_is_zapified(dsl_dir_t *dd)
{
dmu_object_info_t doi;
dmu_object_info_from_db(dd->dd_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
void
dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
SPA_FEATURE_LIVELIST));
dsl_deadlist_open(&dd->dd_livelist, mos, obj);
bplist_create(&dd->dd_pending_allocs);
bplist_create(&dd->dd_pending_frees);
}
void
dsl_dir_livelist_close(dsl_dir_t *dd)
{
dsl_deadlist_close(&dd->dd_livelist);
bplist_destroy(&dd->dd_pending_allocs);
bplist_destroy(&dd->dd_pending_frees);
}
void
dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
{
uint64_t obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
livelist_condense_entry_t to_condense = spa->spa_to_condense;
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return;
/*
* If the livelist being removed is set to be condensed, stop the
* condense zthr and indicate the cancellation in the spa_to_condense
* struct in case the condense no-wait synctask has already started
*/
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL &&
(to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
/*
* We use zthr_wait_cycle_done instead of zthr_cancel
* because we don't want to destroy the zthr, just have
* it skip its current task.
*/
spa->spa_to_condense.cancelled = B_TRUE;
zthr_wait_cycle_done(ll_condense_thread);
/*
* If we've returned from zthr_wait_cycle_done without
* clearing the to_condense data structure it's either
* because the no-wait synctask has started (which is
* indicated by 'syncing' field of to_condense) and we
* can expect it to clear to_condense on its own.
* Otherwise, we returned before the zthr ran. The
* checkfunc will now fail as cancelled == B_TRUE so we
* can safely NULL out ds, allowing a different dir's
* livelist to be condensed.
*
* We can be sure that the to_condense struct will not
* be repopulated at this stage because both this
* function and dsl_livelist_try_condense execute in
* syncing context.
*/
if ((spa->spa_to_condense.ds != NULL) &&
!spa->spa_to_condense.syncing) {
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
spa);
spa->spa_to_condense.ds = NULL;
}
}
dsl_dir_livelist_close(dd);
VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, tx));
if (total) {
dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
}
}
static int
dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
zfs_wait_activity_t activity, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
switch (activity) {
case ZFS_WAIT_DELETEQ: {
#ifdef _KERNEL
objset_t *os;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
break;
mutex_enter(&os->os_user_ptr_lock);
void *user = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (dmu_objset_type(os) != DMU_OST_ZFS ||
user == NULL || zfs_get_vfs_flag_unmounted(os)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t readonly = B_FALSE;
error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
NULL);
if (error != 0)
break;
if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t count, unlinked_obj;
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&unlinked_obj);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
break;
}
error = zap_count(os, unlinked_obj, &count);
if (error == 0)
*in_progress = (count != 0);
break;
#else
/*
* The delete queue is ZPL specific, and libzpool doesn't have
* it. It doesn't make sense to wait for it.
*/
(void) ds;
*in_progress = B_FALSE;
break;
#endif
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
int
dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
boolean_t *waited)
{
int error = 0;
boolean_t in_progress;
dsl_pool_t *dp = dd->dd_pool;
for (;;) {
dsl_pool_config_enter(dp, FTAG);
error = dsl_dir_activity_in_progress(dd, ds, activity,
&in_progress);
dsl_pool_config_exit(dp, FTAG);
if (error != 0 || !in_progress)
break;
*waited = B_TRUE;
if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
0 || dd->dd_activity_cancelled) {
error = SET_ERROR(EINTR);
break;
}
}
return (error);
}
void
dsl_dir_cancel_waiters(dsl_dir_t *dd)
{
mutex_enter(&dd->dd_activity_lock);
dd->dd_activity_cancelled = B_TRUE;
cv_broadcast(&dd->dd_activity_cv);
while (dd->dd_activity_waiters > 0)
cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
mutex_exit(&dd->dd_activity_lock);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_dir_set_quota);
EXPORT_SYMBOL(dsl_dir_set_reservation);
#endif
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zvol_enforce_quotas, INT, ZMOD_RW,
"Enable strict ZVOL quota enforcment");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 07a527e332bc..1dd44171c10e 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1,5247 +1,5250 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#include <sys/dbuf.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(spa_t *spa);
static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb);
extern uint_t zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
/*
* 'zpool status' uses bytes processed per pass to report throughput and
* estimate time remaining. We define a pass to start when the scanning
* phase completes for a sequential resilver. Optionally, this value
* may be used to reset the pass statistics every N txgs to provide an
* estimated completion time based on currently observed performance.
*/
static uint_t zfs_scan_report_txgs = 0;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
static int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
static uint64_t zfs_scan_vdev_limit = 16 << 20;
static uint_t zfs_scan_issue_strategy = 0;
/* don't queue & sort zios, go direct */
static int zfs_scan_legacy = B_FALSE;
static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
static uint_t zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
/* fraction of physmem */
static uint_t zfs_scan_mem_lim_fact = 20;
/* fraction of mem lim above */
static uint_t zfs_scan_mem_lim_soft_fact = 20;
/* minimum milliseconds to scrub per txg */
static uint_t zfs_scrub_min_time_ms = 1000;
/* minimum milliseconds to obsolete per txg */
static uint_t zfs_obsolete_min_time_ms = 500;
/* minimum milliseconds to free per txg */
static uint_t zfs_free_min_time_ms = 1000;
/* minimum milliseconds to resilver per txg */
static uint_t zfs_resilver_min_time_ms = 3000;
static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
static uint64_t zfs_async_block_max_blocks = UINT64_MAX;
/* max number of dedup blocks to free in a single TXG */
static uint64_t zfs_max_async_dedup_frees = 100000;
/* set to disable resilver deferring */
static int zfs_resilver_disable_defer = B_FALSE;
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
/*
* Enable/disable the processing of the free_bpobj object.
*/
static int zfs_free_bpobj_enabled = 1;
/* Error blocks to be scrubbed in one txg. */
-uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
+static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0.
* SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_nr_dvas;
/* fields from zio_t */
uint32_t sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issuing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
/*
* There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
* depending on how many were in the original bp. Only the
* first DVA is really used for sorting and issuing purposes.
* The other DVAs (if provided) simply exist so that the zio
* layer can find additional copies to repair from in the
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
dva_t sio_dva[];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
#define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
#define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
#define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
#define SIO_GET_END_OFFSET(sio) \
(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
#define SIO_GET_MUSED(sio) \
(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
zio_t *q_zio; /* scn_zio_root child for waiting on IO */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
uint64_t q_last_ext_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
zfs_refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
/* sio->sio_nr_dvas must be set so we know which cache to free from */
static void
sio_free(scan_io_t *sio)
{
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
}
/* It is up to the caller to set sio->sio_nr_dvas for freeing */
static scan_io_t *
sio_alloc(unsigned short nr_dvas)
{
ASSERT3U(nr_dvas, >, 0);
ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
}
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
}
}
void
scan_fini(void)
{
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
kmem_cache_destroy(sio_cache[i]);
}
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
memset(bp, 0, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = bp->blk_phys_birth;
sio->sio_birth = bp->blk_birth;
sio->sio_cksum = bp->blk_cksum;
sio->sio_nr_dvas = BP_GET_NDVAS(bp);
/*
* Copy the DVAs to the sio. We need all copies of the block so
* that the self healing code can use the alternate copies if the
* first is corrupted. We want the DVA at index dva_i to be first
* in the sio since this is the primary one that we want to issue.
*/
for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
}
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress for %s; "
"restarting new-style scrub in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t),
ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys);
if (err != 0 && err != ENOENT)
return (err);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
memcpy(&scn->scn_phys, zaptmp,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub for %s was modified "
"by old software; restarting in txg %llu",
spa->spa_name,
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting on %s in txg "
"%llu",
spa->spa_name,
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
za.za_first_integer);
}
zap_cursor_fini(&zc);
}
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
scan_ds_prefetch_queue_clear(scn);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t *dp)
{
return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
(spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_errorscrubbing(const dsl_pool_t *dp)
{
dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys;
return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING &&
errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB);
}
boolean_t
dsl_errorscrub_is_paused(const dsl_scan_t *scn)
{
return (dsl_errorscrubbing(scn->scn_dp) &&
scn->errorscrub_phys.dep_paused_flags);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
static void
dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
{
scn->errorscrub_phys.dep_cursor =
zap_cursor_serialize(&scn->errorscrub_cursor);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS,
&scn->errorscrub_phys, tx));
}
static void
dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
scn->errorscrub_phys.dep_func = *funcp;
scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING;
scn->errorscrub_phys.dep_start_time = gethrestime_sec();
scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa);
scn->errorscrub_phys.dep_examined = 0;
scn->errorscrub_phys.dep_errors = 0;
scn->errorscrub_phys.dep_cursor = 0;
zap_cursor_init_serialized(&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
vdev_config_dirty(spa->spa_root_vdev);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START);
dsl_errorscrub_sync_state(scn, tx);
spa_history_log_internal(spa, "error scrub setup", tx,
"func=%u mintxg=%u maxtxg=%llu",
*funcp, 0, (u_longlong_t)tx->tx_txg);
}
static int
dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) {
return (SET_ERROR(EBUSY));
}
if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) {
return (ECANCELED);
}
return (0);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_queues_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0);
if (scn->scn_queues_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
memcpy(&scn->scn_phys_cached, &scn->scn_phys,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) ||
dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(EBUSY));
return (0);
}
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
/*
* If we are starting a fresh scrub, we erase the error scrub
* information from disk.
*/
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
dsl_errorscrub_sync_state(scn, tx);
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
vdev_scan_stat_init(spa->spa_root_vdev);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_START);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
/*
* When starting a resilver clear any existing rebuild state.
* This is required to prevent stale rebuild status from
* being reported when a rebuild is run, then a resilver and
* finally a scrub. In which case only the scrub status
* should be reported by 'zpool status'.
*/
if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
vdev_rebuild_clear_sync(
(void *)(uintptr_t)vd->vdev_id, tx);
}
}
}
/* back to the generic stuff */
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
}
memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
/*
* Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub,
* error scrub or resilver. Can also be called to resume a paused scrub or
* error scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_RESILVER) {
dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB) {
if (dsl_errorscrub_is_paused(dp->dp_scan)) {
/*
* got error scrub start cmd, resume paused error scrub.
*/
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_RESUME);
return (ECANCELED);
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync,
&func, 0, ZFS_SPACE_CHECK_RESERVED));
}
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static void
dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
if (complete) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH);
spa_history_log_internal(spa, "error scrub done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
} else {
spa_history_log_internal(spa, "error scrub canceled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
}
scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa->spa_scrub_active = B_FALSE;
spa_errlog_rotate(spa);
scn->errorscrub_phys.dep_end_time = gethrestime_sec();
zap_cursor_fini(&scn->errorscrub_cursor);
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_errorscrubbing(scn->scn_dp));
}
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scan_ds_prefetch_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*
* As the scrub does not currently support traversing
* data that have been freed but are part of a checkpoint,
* we don't mark the scrub as done in the DTLs as faults
* may still exist in those vdevs.
*/
if (complete &&
!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
if (scn->scn_phys.scn_min_txg) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_FINISH);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_SCRUB_FINISH);
}
} else {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
0, B_TRUE, B_FALSE);
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
/*
* Clear any resilver_deferred flags in the config.
* If there are drives that need resilvering, kick
* off an asynchronous request to start resilver.
* vdev_clear_resilver_deferred() may update the config
* before the resilver can restart. In the event of
* a crash during this period, the spa loading code
* will find the drives that need to be resilvered
* and start the resilver then.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
(u_longlong_t)spa_approx_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/* Clear recent error events (i.e. duplicate events tracking) */
if (complete)
zfs_ereport_clear(spa, NULL);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
static int
dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/*
* can't pause a error scrub when there is no in-progress
* error scrub.
*/
if (!dsl_errorscrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused error scrub */
if (dsl_errorscrub_is_paused(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
spa->spa_scan_pass_errorscrub_pause = gethrestime_sec();
scn->errorscrub_phys.dep_paused_flags = B_TRUE;
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_errorscrub_is_paused(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the error scrub
* rate shown in the output of 'zpool status'.
*/
spa->spa_scan_pass_errorscrub_spent_paused +=
gethrestime_sec() -
spa->spa_scan_pass_errorscrub_pause;
spa->spa_scan_pass_errorscrub_pause = 0;
scn->errorscrub_phys.dep_paused_flags = B_FALSE;
zap_cursor_init_serialized(
&scn->errorscrub_cursor,
spa->spa_meta_objset, spa->spa_errlog_last,
scn->errorscrub_phys.dep_cursor);
dsl_errorscrub_sync_state(scn, tx);
}
}
}
static int
dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
/* can't cancel a error scrub when there is no one in-progress */
if (!dsl_errorscrubbing(scn->scn_dp))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_errorscrub_done(scn, B_FALSE, tx);
dsl_errorscrub_sync_state(scn, tx);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL,
ESC_ZFS_ERRORSCRUB_ABORT);
}
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
(void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync,
NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
if (dsl_errorscrubbing(dp)) {
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_errorscrub_pause_resume_check,
dsl_errorscrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver for %s at txg=%llu",
dp->dp_spa->spa_name, (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_queues_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t alloc, mlim_hard, mlim_soft, mused;
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/*
* # of extents in exts_by_addr = # in exts_by_size.
* B-tree efficiency is ~75%, but can be as low as 50%.
*/
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
((sizeof (range_seg_gap_t) + sizeof (uint64_t)) *
3 / 2) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_queues_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 and objset blocks. */
if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
dprintf("suspending at first available bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
zb->zb_objset, 0, 0, 0);
} else if (zb != NULL) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/*
* We suspend if:
* - we have scrubbed for at least the minimum time (default 1 sec
* for error scrub), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
*/
uint64_t curr_time_ns = gethrtime();
uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int mintime = zfs_scrub_min_time_ms;
if ((NSEC2MSEC(error_scrub_time_ns) > mintime &&
(txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa)) {
if (zb) {
dprintf("error scrub suspending at bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
}
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
(void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
(void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
const lr_write_t *lr = (const lr_write_t *)lrc;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
ASSERT(spa_writeable(dp->dp_spa));
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0)
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
zfs_refcount_create(&spc->spc_refcnt);
zfs_refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
static void
scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
void *cookie = NULL;
scan_prefetch_issue_ctx_t *spic = NULL;
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
&cookie)) != NULL) {
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
return;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
(void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
/* issue the prefetch asynchronously */
(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
&spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it, zero it out to indicate that it's OK
* to start checking for suspending again.
*/
if (zbookmark_subtree_tbd(dnp, zb,
&scn->scn_phys.scn_bookmark)) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
/*
* There is an unlikely case of encountering dnodes with contradicting
* dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
* or modified before commit 4254acb was merged. As it is not possible
* to know which of the two is correct, report an error.
*/
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
} else if (!zfs_blkptr_verify(spa, bp,
BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EINVAL));
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
return;
}
/*
* Check if this block contradicts any filesystem flags.
*/
spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
if (f != SPA_FEATURE_NONE)
ASSERT(dsl_dataset_feature_is_active(ds, f));
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
goto out;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
goto out;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
goto out;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
out:
kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu on %s; currently "
"traversing; reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu on %s; in queue; "
"removing",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
ds->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
ds1->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
ds2->ds_dir->dd_pool->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when an origin dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted clone.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg1, mintxg2;
boolean_t ds1_queued, ds2_queued;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
/*
* Handle the in-memory scan queue.
*/
ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* The swapping code below would not handle this case correctly,
* since we can't insert ds2 if it is already there. That's
* because scan_ds_queue_insert() prohibits a duplicate insert
* and panics.
*/
} else if (ds1_queued) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
} else if (ds2_queued) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
}
/*
* Handle the on-disk scan queue.
* The on-disk state is an out-of-date version of the in-memory state,
* so the in-memory and on-disk values for ds1_queued and ds2_queued may
* be different. Therefore we need to apply the swap logic to the
* on-disk state independently of the in-memory state.
*/
ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* Alternatively, we could check for EEXIST from
* zap_add_int_key() and back out to the original state, but
* that would be more work than checking for this case upfront.
*/
} else if (ds1_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds2->ds_object);
} else if (ds2_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
zfs_dbgmsg("clone_swap ds %llu on %s; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
dp->dp_spa->spa_name,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
/*
* Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
* BP as in the head), they must be ignored. In addition, $ORIGIN
* doesn't have a objset (i.e. its ds_bp is a hole) so we don't
* need to look for a ZIL in it either. So we traverse the ZIL here,
* rather than in scan_recurse(), because the regular snapshot
* block-sharing rules don't apply to it.
*/
if (!dsl_dataset_is_snapshot(ds) &&
(dp->dp_origin_snap == NULL ||
ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
objset_t *os;
if (dmu_objset_from_ds(ds, &os) != 0) {
goto out;
}
dsl_scan_zil(dp, &os->os_zil_header);
}
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass on %s; visiting again",
dp->dp_spa->spa_name);
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
(void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
(void) tx;
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
if (!dsl_scan_is_running(scn))
return;
/*
* This function is special because it is the only thing
* that can add scan_io_t's to the vdev scan queues from
* outside dsl_scan_sync(). For the most part this is ok
* as long as it is called from within syncing context.
* However, dsl_scan_sync() expects that no new sio's will
* be added between when all the work for a scan is done
* and the next txg when the scan is actually marked as
* completed. This check ensures we do not issue new sio's
* during this period.
*/
if (scn->scn_done_txg != 0)
return;
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, ddp, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde = {{{{0}}}};
int error;
uint64_t n = 0;
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; "
"suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name,
(int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_data_disks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
continue;
leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
uint64_t dirty_min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(scn->scn_dp->dp_dirty_total >= dirty_min_bytes ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
{
scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1;
SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) {
ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
queue->q_exts_by_addr));
ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&queue->q_scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr)) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
queue->q_last_ext_addr = SIO_GET_OFFSET(sio);
return (B_TRUE);
} else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
queue->q_last_ext_addr = -1;
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
if (!scn->scn_checkpointing && !scn->scn_clearing)
return (NULL);
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) ||
zfs_scan_issue_strategy == 1)
return (range_tree_first(rt));
/*
* Try to continue previous extent if it is not completed yet. After
* shrink in scan_io_queue_gather() it may no longer be the best, but
* otherwise we leave shorter remnant every txg.
*/
uint64_t start;
uint64_t size = 1ULL << rt->rt_shift;
range_seg_t *addr_rs;
if (queue->q_last_ext_addr != -1) {
start = queue->q_last_ext_addr;
addr_rs = range_tree_find(rt, start, size);
if (addr_rs != NULL)
return (addr_rs);
}
/*
* Nothing to continue, so find new best extent.
*/
uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL);
if (v == NULL)
return (NULL);
queue->q_last_ext_addr = start = *v << rt->rt_shift;
/*
* We need to get the original entry in the by_addr tree so we can
* modify it.
*/
addr_rs = range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(addr_rs, rt), ==, start);
ASSERT3U(rs_get_end(addr_rs, rt), >, start);
return (addr_rs);
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
range_seg_t *rs;
scan_io_t *sio;
zio_t *zio;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa,
NULL, NULL, NULL, ZIO_FLAG_CANFAIL);
mutex_enter(q_lock);
queue->q_zio = zio;
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
(vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
do {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = SIO_GET_END_OFFSET(last_sio);
if (seg_start == 0)
seg_start = SIO_GET_OFFSET(first_sio);
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
} while (more_left);
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
- while ((sio = list_head(&sio_list)) != NULL) {
- list_remove(&sio_list, sio);
+ while ((sio = list_remove_head(&sio_list)) != NULL)
scan_io_queue_insert_impl(queue, sio);
- }
queue->q_zio = NULL;
mutex_exit(q_lock);
zio_nowait(zio);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_queues_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
if (zfs_async_block_max_blocks != 0 &&
scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
return (B_TRUE);
}
if (zfs_max_async_dedup_frees != 0 &&
scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
return (B_TRUE);
}
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
if (BP_GET_DEDUP(bp))
scn->scn_dedup_frees_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
static int
bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (dsl_scan_free_block_cb(arg, bp, tx));
}
static int
dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
dsl_scan_t *scn = arg;
const dva_t *dva = &bp->blk_dva[0];
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
DVA_GET_ASIZE(dva), tx);
scn->scn_visited_this_txg++;
return (0);
}
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
boolean_t clones_left;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
clones_left = spa_livelist_delete_check(spa);
return ((used != 0) || (clones_left));
}
boolean_t
dsl_errorscrub_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if (dsl_errorscrubbing(scn->scn_dp))
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
boolean_t need_resilver = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
need_resilver |=
dsl_scan_check_deferred(vd->vdev_child[c]);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (need_resilver);
if (!vd->vdev_resilver_deferred)
need_resilver = B_TRUE;
return (need_resilver);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops == &vdev_indirect_ops) {
/*
* The indirect vdev can point to multiple
* vdevs. For simplicity, always create
* the resilver zio_t. zio_vdev_io_start()
* will bypass the child resilver i/o's if
* they are on vdevs that don't have DTL's.
*/
return (B_TRUE);
}
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
return (B_FALSE);
/*
* Check that this top-level vdev has a device under it which
* is resilvering and is not deferred.
*/
if (!dsl_scan_check_deferred(vd))
return (B_FALSE);
return (B_TRUE);
}
static int
dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
int err = 0;
if (spa_suspend_async_destroy(spa))
return (0);
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
bpobj_dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree on %s in txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
spa->spa_name, (longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
/*
* Write out changes to the DDT and the BRT that may be required
* as a result of the blocks freed. This ensures that the DDT
* and the BRT are clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
brt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
!spa_livelist_delete_check(spa)) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));
if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_OBSOLETE_COUNTS));
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
err = bpobj_iterate(&dp->dp_obsolete_bpobj,
dsl_scan_obsolete_block_cb, scn, tx);
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
dsl_pool_destroy_obsolete_bpobj(dp, tx);
}
return (0);
}
static void
name_to_bookmark(char *buf, zbookmark_phys_t *zb)
{
zb->zb_objset = zfs_strtonum(buf, &buf);
ASSERT(*buf == ':');
zb->zb_object = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == '\0');
}
static void
name_to_object(char *buf, uint64_t *obj)
{
*obj = zfs_strtonum(buf, &buf);
ASSERT(*buf == '\0');
}
static void
read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
objset_t *os;
if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0)
return;
if (dmu_objset_from_ds(ds, &os) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
/*
* If the key is not loaded dbuf_dnode_findbp() will error out with
* EACCES. However in that case dnode_hold() will eventually call
* dbuf_read()->zio_wait() which may call spa_log_error(). This will
* lead to a deadlock due to us holding the mutex spa_errlist_lock.
* Avoid this by checking here if the keys are loaded, if not return.
* If the keys are not loaded the head_errlog feature is meaningless
* as we cannot figure out the birth txg of the block pointer.
*/
if (dsl_dataset_get_keystatus(ds->ds_dir) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
dsl_dataset_rele(ds, FTAG);
return;
}
dnode_t *dn;
blkptr_t bp;
if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) {
dsl_dataset_rele(ds, FTAG);
return;
}
rw_enter(&dn->dn_struct_rwlock, RW_READER);
int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL,
NULL);
if (error) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
if (!error && BP_IS_HOLE(&bp)) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
return;
}
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW |
ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB;
/* If it's an intent log block, failure is expected. */
if (zb.zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
ASSERT(!BP_IS_EMBEDDED(&bp));
scan_exec_io(dp, &bp, zio_flags, &zb, NULL);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* We keep track of the scrubbed error blocks in "count". This will be used
* when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This
* function is modelled after check_filesystem().
*/
static int
scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep,
int *count)
{
dsl_dataset_t *ds;
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds);
if (error != 0)
return (error);
uint64_t latest_txg;
uint64_t txg_to_consider = spa->spa_syncing_txg;
boolean_t check_snapshot = B_TRUE;
error = find_birth_txg(ds, zep, &latest_txg);
/*
* If find_birth_txg() errors out, then err on the side of caution and
* proceed. In worst case scenario scrub all objects. If zep->zb_birth
* is 0 (e.g. in case of encryption with unloaded keys) also proceed to
* scrub all objects.
*/
if (error == 0 && zep->zb_birth == latest_txg) {
/* Block neither free nor re written. */
zbookmark_phys_t zb;
zep_to_zb(fs, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EFAULT));
}
check_snapshot = B_FALSE;
} else if (error == 0) {
txg_to_consider = latest_txg;
}
/*
* Retrieve the number of snapshots if the dataset is not a snapshot.
*/
uint64_t snap_count = 0;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
error = zap_count(spa->spa_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
}
if (snap_count == 0) {
/* Filesystem without snapshots. */
dsl_dataset_rele(ds, FTAG);
return (0);
}
uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
/* Check only snapshots created from this file system. */
while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
snap_obj_txg <= txg_to_consider) {
error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds);
if (error != 0)
return (error);
if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) {
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsl_dataset_rele(ds, FTAG);
continue;
}
boolean_t affected = B_TRUE;
if (check_snapshot) {
uint64_t blk_txg;
error = find_birth_txg(ds, zep, &blk_txg);
/*
* Scrub the snapshot also when zb_birth == 0 or when
* find_birth_txg() returns an error.
*/
affected = (error == 0 && zep->zb_birth == blk_txg) ||
(error != 0) || (zep->zb_birth == 0);
}
/* Scrub snapshots. */
if (affected) {
zbookmark_phys_t zb;
zep_to_zb(snap_obj, zep, &zb);
scn->scn_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/* We have already acquired the config lock for spa */
read_by_block_level(scn, zb);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined++;
scn->errorscrub_phys.dep_to_examine--;
(*count)++;
if ((*count) == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, &zb)) {
dsl_dataset_rele(ds, FTAG);
return (EFAULT);
}
}
snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
return (0);
}
void
dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) {
return;
}
if (dsl_scan_resilvering(scn->scn_dp)) {
/* cancel the error scrub if resilver started */
dsl_scan_cancel(scn->scn_dp);
return;
}
spa->spa_scrub_active = B_TRUE;
scn->scn_sync_start_time = gethrtime();
/*
* zfs_scan_suspend_progress can be set to disable scrub progress.
* See more detailed comment in dsl_scan_sync().
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
int mintime = zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
int i = 0;
zap_attribute_t *za;
zbookmark_phys_t *zb;
boolean_t limit_exceeded = B_FALSE;
za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
name_to_bookmark(za->za_name, zb);
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
dsl_pool_config_enter(dp, FTAG);
read_by_block_level(scn, *zb);
dsl_pool_config_exit(dp, FTAG);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
scn->errorscrub_phys.dep_examined += 1;
scn->errorscrub_phys.dep_to_examine -= 1;
i++;
if (i == zfs_scrub_error_blocks_per_txg ||
dsl_error_scrub_check_suspend(scn, zb)) {
limit_exceeded = B_TRUE;
break;
}
}
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
kmem_free(za, sizeof (*za));
kmem_free(zb, sizeof (*zb));
return;
}
int error = 0;
for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0;
zap_cursor_advance(&scn->errorscrub_cursor)) {
zap_cursor_t *head_ds_cursor;
zap_attribute_t *head_ds_attr;
zbookmark_err_phys_t head_ds_block;
head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
uint64_t head_ds_err_obj = za->za_first_integer;
uint64_t head_ds;
name_to_object(za->za_name, &head_ds);
boolean_t config_held = B_FALSE;
uint64_t top_affected_fs;
for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
name_to_errphys(head_ds_attr->za_name, &head_ds_block);
/*
* In case we are called from spa_sync the pool
* config is already held.
*/
if (!dsl_pool_config_held(dp)) {
dsl_pool_config_enter(dp, FTAG);
config_held = B_TRUE;
}
error = find_top_affected_fs(spa,
head_ds, &head_ds_block, &top_affected_fs);
if (error)
break;
error = scrub_filesystem(spa, top_affected_fs,
&head_ds_block, &i);
if (error == SET_ERROR(EFAULT)) {
limit_exceeded = B_TRUE;
break;
}
}
zap_cursor_fini(head_ds_cursor);
kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
kmem_free(head_ds_attr, sizeof (*head_ds_attr));
if (config_held)
dsl_pool_config_exit(dp, FTAG);
}
kmem_free(za, sizeof (*za));
kmem_free(zb, sizeof (*zb));
if (!limit_exceeded)
dsl_errorscrub_done(scn, B_TRUE, tx);
dsl_errorscrub_sync_state(scn, tx);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
if (spa->spa_resilver_deferred &&
!spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init). We also restart scans if there
* is a deferred resilver and the user has manually disabled
* deferred resilvers via the tunable.
*/
if (dsl_scan_restarting(scn, tx) ||
(spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&func, tx);
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
err = dsl_process_async_destroys(dp, tx);
if (err != 0)
return;
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* zfs_scan_suspend_progress can be set to disable scan progress.
* We don't want to spin the txg_sync thread, so we add a delay
* here to simulate the time spent doing a scan. This is mostly
* useful for testing and debugging.
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
uint_t mintime = (scn->scn_phys.scn_func ==
POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms :
zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
/*
* Disabled by default, set zfs_scan_report_txgs to report
* average performance over the last zfs_scan_report_txgs TXGs.
*/
if (!dsl_scan_is_paused_scrub(scn) && zfs_scan_report_txgs != 0 &&
tx->tx_txg % zfs_scan_report_txgs == 0) {
scn->scn_issued_before_pass += spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint for %s",
spa->spa_name);
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing for %s",
spa->spa_name);
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB, maximum 1/4 of arc_c_max).
* Limits for the issuing phase are done per top-level vdev and
* are handled separately.
*/
scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20,
zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa)));
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync for %s txg %llu; "
"bm=%llu/%llu/%llu/%llu",
spa->spa_name,
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks of %s in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
spa->spa_name,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
scn->scn_issued_before_pass +=
spa->spa_scan_pass_issued;
spa_scan_stat_init(spa);
}
zfs_dbgmsg("scan complete for %s txg %llu",
spa->spa_name,
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) "
"in %llums (avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
spa->spa_name,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu for %s",
(longlong_t)tx->tx_txg,
spa->spa_name);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_queues_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Update the spa's stats on how many bytes we have issued.
* Sequential scrubs create a zio for each DVA of the bp. Each
* of these will include all DVAs for repair purposes, but the
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
atomic_add_64(&spa->spa_scan_pass_issued,
all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
static void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
for (int i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (unlikely(avl_is_empty(&queue->q_sios_by_addr)))
atomic_add_64(&scn->scn_queues_pending, 1);
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio),
SIO_GET_ASIZE(sio));
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
queue->q_last_ext_addr = -1;
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block_issued(spa, bp, B_TRUE);
return (0);
}
/* Embedded BP's have phys_birth==0, so we reject them above. */
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
uint64_t asize = DVA_GET_ASIZE(dva);
scn->scn_phys.scn_examined += asize;
spa->spa_scan_pass_exam += asize;
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block_issued(spa, bp, B_TRUE);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
if (dsl_errorscrubbing(spa->spa_dsl_pool) &&
!dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan
->errorscrub_phys.dep_errors);
} else {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys
.scn_errors);
}
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
zio_t *pio;
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
pio = scn->scn_zio_root;
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
ASSERT3U(queue->q_maxinflight_bytes, >, 0);
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
pio = queue->q_zio;
mutex_exit(q_lock);
}
ASSERT(pio != NULL);
count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*
* Since we do not care if one extent is only few percent better than another,
* compress the score into 6 bits via binary logarithm AKA highbit64() and
* put into otherwise unused due to ashift high bits of offset. This allows
* to reduce q_exts_by_size B-tree elements to only 64 bits and compare them
* with single operation. Plus it makes scrubs more sequential and reduces
* chances that minor extent change move it within the B-tree.
*/
+__attribute__((always_inline)) inline
static int
ext_size_compare(const void *x, const void *y)
{
const uint64_t *a = x, *b = y;
return (TREE_CMP(*a, *b));
}
+ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t,
+ ext_size_compare)
+
static void
ext_size_create(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
- zfs_btree_create(size_tree, ext_size_compare, sizeof (uint64_t));
+ zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf,
+ sizeof (uint64_t));
}
static void
ext_size_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
static uint64_t
ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg)
{
(void) rt;
uint64_t size = rsg->rs_end - rsg->rs_start;
uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) *
fill_weight * rsg->rs_fill) >> 7);
ASSERT3U(rt->rt_shift, >=, 8);
return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start);
}
static void
ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_add(size_tree, &v);
}
static void
ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_t *size_tree = arg;
ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP);
uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs);
zfs_btree_remove(size_tree, &v);
}
static void
ext_size_vacate(range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
ext_size_create(rt, arg);
}
static const range_tree_ops_t ext_size_ops = {
.rtop_create = ext_size_create,
.rtop_destroy = ext_size_destroy,
.rtop_add = ext_size_add,
.rtop_remove = ext_size_remove,
.rtop_vacate = ext_size_vacate
};
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
q->q_last_ext_addr = -1;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP,
&q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (!avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t *srch_sio, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
srch_sio = sio_alloc(BP_GET_NDVAS(bp));
bp2sio(bp, srch_sio, dva_i);
start = SIO_GET_OFFSET(srch_sio);
size = SIO_GET_ASIZE(srch_sio);
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and
* resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio != NULL) {
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
ASSERT3U(size, ==, SIO_GET_ASIZE(sio));
avl_remove(&queue->q_sios_by_addr, sio);
if (avl_is_empty(&queue->q_sios_by_addr))
atomic_add_64(&scn->scn_queues_pending, -1);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/* count the block as though we issued it */
sio2bp(sio, &tmpbp);
count_block_issued(spa, &tmpbp, B_FALSE);
sio_free(sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
/*
* Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
* not started, start it. Otherwise, only restart if max txg in DTL range is
* greater than the max txg in the current scan. If the DTL max is less than
* the scan max, then the vdev has not missed any new data since the resilver
* started, so a restart is not needed.
*/
void
dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
{
uint64_t min, max;
if (!vdev_resilver_needed(vd, &min, &max))
return;
if (!dsl_scan_resilvering(dp)) {
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
return;
}
if (max <= dp->dp_scan->scn_phys.scn_max_txg)
return;
/* restart is needed, check if it can be deferred */
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
vdev_defer_resilver(vd);
else
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to scrub per txg");
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to obsolete per txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to free per txg");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
"Set to prevent scans from progressing");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
"Set to disable scrub I/O");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW,
"Max number of blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW,
"Tunable to report resilver performance over the last N txgs");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
"Error blocks to be scrubbed in one txg");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/fm.c b/sys/contrib/openzfs/module/zfs/fm.c
index 76956572f8bd..77d87b694a43 100644
--- a/sys/contrib/openzfs/module/zfs/fm.c
+++ b/sys/contrib/openzfs/module/zfs/fm.c
@@ -1,1374 +1,1373 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Fault Management Architecture (FMA) Resource and Protocol Support
*
* The routines contained herein provide services to support kernel subsystems
* in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
*
* Name-Value Pair Lists
*
* The embodiment of an FMA protocol element (event, fmri or authority) is a
* name-value pair list (nvlist_t). FMA-specific nvlist constructor and
* destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
* to create an nvpair list using custom allocators. Callers may choose to
* allocate either from the kernel memory allocator, or from a preallocated
* buffer, useful in constrained contexts like high-level interrupt routines.
*
* Protocol Event and FMRI Construction
*
* Convenience routines are provided to construct nvlist events according to
* the FMA Event Protocol and Naming Schema specification for ereports and
* FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
*
* ENA Manipulation
*
* Routines to generate ENA formats 0, 1 and 2 are available as well as
* routines to increment formats 1 and 2. Individual fields within the
* ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
* fm_ena_format_get() and fm_ena_gen_get().
*/
#include <sys/types.h>
#include <sys/time.h>
#include <sys/list.h>
#include <sys/nvpair.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
#include <sys/sunddi.h>
#include <sys/systeminfo.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/kstat.h>
#include <sys/zfs_context.h>
#ifdef _KERNEL
#include <sys/atomic.h>
#include <sys/condvar.h>
#include <sys/zfs_ioctl.h>
static uint_t zfs_zevent_len_max = 512;
static uint_t zevent_len_cur = 0;
static int zevent_waiters = 0;
static int zevent_flags = 0;
/* Num events rate limited since the last time zfs_zevent_next() was called */
static uint64_t ratelimit_dropped = 0;
/*
* The EID (Event IDentifier) is used to uniquely tag a zevent when it is
* posted. The posted EIDs are monotonically increasing but not persistent.
* They will be reset to the initial value (1) each time the kernel module is
* loaded.
*/
static uint64_t zevent_eid = 0;
static kmutex_t zevent_lock;
static list_t zevent_list;
static kcondvar_t zevent_cv;
#endif /* _KERNEL */
/*
* Common fault management kstats to record event generation failures
*/
struct erpt_kstat {
kstat_named_t erpt_dropped; /* num erpts dropped on post */
kstat_named_t erpt_set_failed; /* num erpt set failures */
kstat_named_t fmri_set_failed; /* num fmri set failures */
kstat_named_t payload_set_failed; /* num payload set failures */
kstat_named_t erpt_duplicates; /* num duplicate erpts */
};
static struct erpt_kstat erpt_kstat_data = {
{ "erpt-dropped", KSTAT_DATA_UINT64 },
{ "erpt-set-failed", KSTAT_DATA_UINT64 },
{ "fmri-set-failed", KSTAT_DATA_UINT64 },
{ "payload-set-failed", KSTAT_DATA_UINT64 },
{ "erpt-duplicates", KSTAT_DATA_UINT64 }
};
kstat_t *fm_ksp;
#ifdef _KERNEL
static zevent_t *
zfs_zevent_alloc(void)
{
zevent_t *ev;
ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
offsetof(zfs_zevent_t, ze_node));
list_link_init(&ev->ev_node);
return (ev);
}
static void
zfs_zevent_free(zevent_t *ev)
{
/* Run provided cleanup callback */
ev->ev_cb(ev->ev_nvl, ev->ev_detector);
list_destroy(&ev->ev_ze_list);
kmem_free(ev, sizeof (zevent_t));
}
static void
zfs_zevent_drain(zevent_t *ev)
{
zfs_zevent_t *ze;
ASSERT(MUTEX_HELD(&zevent_lock));
list_remove(&zevent_list, ev);
/* Remove references to this event in all private file data */
- while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
- list_remove(&ev->ev_ze_list, ze);
+ while ((ze = list_remove_head(&ev->ev_ze_list)) != NULL) {
ze->ze_zevent = NULL;
ze->ze_dropped++;
}
zfs_zevent_free(ev);
}
void
zfs_zevent_drain_all(uint_t *count)
{
zevent_t *ev;
mutex_enter(&zevent_lock);
while ((ev = list_head(&zevent_list)) != NULL)
zfs_zevent_drain(ev);
*count = zevent_len_cur;
zevent_len_cur = 0;
mutex_exit(&zevent_lock);
}
/*
* New zevents are inserted at the head. If the maximum queue
* length is exceeded a zevent will be drained from the tail.
* As part of this any user space processes which currently have
* a reference to this zevent_t in their private data will have
* this reference set to NULL.
*/
static void
zfs_zevent_insert(zevent_t *ev)
{
ASSERT(MUTEX_HELD(&zevent_lock));
list_insert_head(&zevent_list, ev);
if (zevent_len_cur >= zfs_zevent_len_max)
zfs_zevent_drain(list_tail(&zevent_list));
else
zevent_len_cur++;
}
/*
* Post a zevent. The cb will be called when nvl and detector are no longer
* needed, i.e.:
* - An error happened and a zevent can't be posted. In this case, cb is called
* before zfs_zevent_post() returns.
* - The event is being drained and freed.
*/
int
zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
{
inode_timespec_t tv;
int64_t tv_array[2];
uint64_t eid;
size_t nvl_size = 0;
zevent_t *ev;
int error;
ASSERT(cb != NULL);
gethrestime(&tv);
tv_array[0] = tv.tv_sec;
tv_array[1] = tv.tv_nsec;
error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
eid = atomic_inc_64_nv(&zevent_eid);
error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
goto out;
}
if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = EOVERFLOW;
goto out;
}
ev = zfs_zevent_alloc();
if (ev == NULL) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = ENOMEM;
goto out;
}
ev->ev_nvl = nvl;
ev->ev_detector = detector;
ev->ev_cb = cb;
ev->ev_eid = eid;
mutex_enter(&zevent_lock);
zfs_zevent_insert(ev);
cv_broadcast(&zevent_cv);
mutex_exit(&zevent_lock);
out:
if (error)
cb(nvl, detector);
return (error);
}
void
zfs_zevent_track_duplicate(void)
{
atomic_inc_64(&erpt_kstat_data.erpt_duplicates.value.ui64);
}
static int
zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
{
*ze = zfsdev_get_state(minor, ZST_ZEVENT);
if (*ze == NULL)
return (SET_ERROR(EBADF));
return (0);
}
zfs_file_t *
zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
{
zfs_file_t *fp = zfs_file_get(fd);
if (fp == NULL)
return (NULL);
int error = zfsdev_getminor(fp, minorp);
if (error == 0)
error = zfs_zevent_minor_to_state(*minorp, ze);
if (error) {
zfs_zevent_fd_rele(fp);
fp = NULL;
}
return (fp);
}
void
zfs_zevent_fd_rele(zfs_file_t *fp)
{
zfs_file_put(fp);
}
/*
* Get the next zevent in the stream and place a copy in 'event'. This
* may fail with ENOMEM if the encoded nvlist size exceeds the passed
* 'event_size'. In this case the stream pointer is not advanced and
* and 'event_size' is set to the minimum required buffer size.
*/
int
zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
uint64_t *dropped)
{
zevent_t *ev;
size_t size;
int error = 0;
mutex_enter(&zevent_lock);
if (ze->ze_zevent == NULL) {
/* New stream start at the beginning/tail */
ev = list_tail(&zevent_list);
if (ev == NULL) {
error = ENOENT;
goto out;
}
} else {
/*
* Existing stream continue with the next element and remove
* ourselves from the wait queue for the previous element
*/
ev = list_prev(&zevent_list, ze->ze_zevent);
if (ev == NULL) {
error = ENOENT;
goto out;
}
}
VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
if (size > *event_size) {
*event_size = size;
error = ENOMEM;
goto out;
}
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
(void) nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
*dropped = ze->ze_dropped;
#ifdef _KERNEL
/* Include events dropped due to rate limiting */
*dropped += atomic_swap_64(&ratelimit_dropped, 0);
#endif
ze->ze_dropped = 0;
out:
mutex_exit(&zevent_lock);
return (error);
}
/*
* Wait in an interruptible state for any new events.
*/
int
zfs_zevent_wait(zfs_zevent_t *ze)
{
int error = EAGAIN;
mutex_enter(&zevent_lock);
zevent_waiters++;
while (error == EAGAIN) {
if (zevent_flags & ZEVENT_SHUTDOWN) {
error = SET_ERROR(ESHUTDOWN);
break;
}
if (cv_wait_sig(&zevent_cv, &zevent_lock) == 0) {
error = SET_ERROR(EINTR);
break;
} else if (!list_is_empty(&zevent_list)) {
error = 0;
continue;
} else {
error = EAGAIN;
}
}
zevent_waiters--;
mutex_exit(&zevent_lock);
return (error);
}
/*
* The caller may seek to a specific EID by passing that EID. If the EID
* is still available in the posted list of events the cursor is positioned
* there. Otherwise ENOENT is returned and the cursor is not moved.
*
* There are two reserved EIDs which may be passed and will never fail.
* ZEVENT_SEEK_START positions the cursor at the start of the list, and
* ZEVENT_SEEK_END positions the cursor at the end of the list.
*/
int
zfs_zevent_seek(zfs_zevent_t *ze, uint64_t eid)
{
zevent_t *ev;
int error = 0;
mutex_enter(&zevent_lock);
if (eid == ZEVENT_SEEK_START) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = NULL;
goto out;
}
if (eid == ZEVENT_SEEK_END) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ev = list_head(&zevent_list);
if (ev) {
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
} else {
ze->ze_zevent = NULL;
}
goto out;
}
for (ev = list_tail(&zevent_list); ev != NULL;
ev = list_prev(&zevent_list, ev)) {
if (ev->ev_eid == eid) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
break;
}
}
if (ev == NULL)
error = ENOENT;
out:
mutex_exit(&zevent_lock);
return (error);
}
void
zfs_zevent_init(zfs_zevent_t **zep)
{
zfs_zevent_t *ze;
ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
list_link_init(&ze->ze_node);
}
void
zfs_zevent_destroy(zfs_zevent_t *ze)
{
mutex_enter(&zevent_lock);
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
mutex_exit(&zevent_lock);
kmem_free(ze, sizeof (zfs_zevent_t));
}
#endif /* _KERNEL */
/*
* Wrappers for FM nvlist allocators
*/
static void *
i_fm_alloc(nv_alloc_t *nva, size_t size)
{
(void) nva;
return (kmem_alloc(size, KM_SLEEP));
}
static void
i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
{
(void) nva;
kmem_free(buf, size);
}
static const nv_alloc_ops_t fm_mem_alloc_ops = {
.nv_ao_init = NULL,
.nv_ao_fini = NULL,
.nv_ao_alloc = i_fm_alloc,
.nv_ao_free = i_fm_free,
.nv_ao_reset = NULL
};
/*
* Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
* to the newly allocated nv_alloc_t structure is returned upon success or NULL
* is returned to indicate that the nv_alloc structure could not be created.
*/
nv_alloc_t *
fm_nva_xcreate(char *buf, size_t bufsz)
{
nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
return (nvhdl);
}
/*
* Destroy a previously allocated nv_alloc structure. The fixed buffer
* associated with nva must be freed by the caller.
*/
void
fm_nva_xdestroy(nv_alloc_t *nva)
{
nv_alloc_fini(nva);
kmem_free(nva, sizeof (nv_alloc_t));
}
/*
* Create a new nv list. A pointer to a new nv list structure is returned
* upon success or NULL is returned to indicate that the structure could
* not be created. The newly created nv list is created and managed by the
* operations installed in nva. If nva is NULL, the default FMA nva
* operations are installed and used.
*
* When called from the kernel and nva == NULL, this function must be called
* from passive kernel context with no locks held that can prevent a
* sleeping memory allocation from occurring. Otherwise, this function may
* be called from other kernel contexts as long a valid nva created via
* fm_nva_create() is supplied.
*/
nvlist_t *
fm_nvlist_create(nv_alloc_t *nva)
{
int hdl_alloced = 0;
nvlist_t *nvl;
nv_alloc_t *nvhdl;
if (nva == NULL) {
nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
hdl_alloced = 1;
} else {
nvhdl = nva;
}
if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
if (hdl_alloced) {
nv_alloc_fini(nvhdl);
kmem_free(nvhdl, sizeof (nv_alloc_t));
}
return (NULL);
}
return (nvl);
}
/*
* Destroy a previously allocated nvlist structure. flag indicates whether
* or not the associated nva structure should be freed (FM_NVA_FREE) or
* retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
* it to be re-used for future nvlist creation operations.
*/
void
fm_nvlist_destroy(nvlist_t *nvl, int flag)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
nvlist_free(nvl);
if (nva != NULL) {
if (flag == FM_NVA_FREE)
fm_nva_xdestroy(nva);
}
}
int
i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
{
int nelem, ret = 0;
data_type_t type;
while (ret == 0 && name != NULL) {
type = va_arg(ap, data_type_t);
switch (type) {
case DATA_TYPE_BYTE:
ret = nvlist_add_byte(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_BYTE_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_byte_array(payload, name,
va_arg(ap, uchar_t *), nelem);
break;
case DATA_TYPE_BOOLEAN_VALUE:
ret = nvlist_add_boolean_value(payload, name,
va_arg(ap, boolean_t));
break;
case DATA_TYPE_BOOLEAN_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_boolean_array(payload, name,
va_arg(ap, boolean_t *), nelem);
break;
case DATA_TYPE_INT8:
ret = nvlist_add_int8(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int8_array(payload, name,
va_arg(ap, int8_t *), nelem);
break;
case DATA_TYPE_UINT8:
ret = nvlist_add_uint8(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint8_array(payload, name,
va_arg(ap, uint8_t *), nelem);
break;
case DATA_TYPE_INT16:
ret = nvlist_add_int16(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int16_array(payload, name,
va_arg(ap, int16_t *), nelem);
break;
case DATA_TYPE_UINT16:
ret = nvlist_add_uint16(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint16_array(payload, name,
va_arg(ap, uint16_t *), nelem);
break;
case DATA_TYPE_INT32:
ret = nvlist_add_int32(payload, name,
va_arg(ap, int32_t));
break;
case DATA_TYPE_INT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int32_array(payload, name,
va_arg(ap, int32_t *), nelem);
break;
case DATA_TYPE_UINT32:
ret = nvlist_add_uint32(payload, name,
va_arg(ap, uint32_t));
break;
case DATA_TYPE_UINT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint32_array(payload, name,
va_arg(ap, uint32_t *), nelem);
break;
case DATA_TYPE_INT64:
ret = nvlist_add_int64(payload, name,
va_arg(ap, int64_t));
break;
case DATA_TYPE_INT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int64_array(payload, name,
va_arg(ap, int64_t *), nelem);
break;
case DATA_TYPE_UINT64:
ret = nvlist_add_uint64(payload, name,
va_arg(ap, uint64_t));
break;
case DATA_TYPE_UINT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint64_array(payload, name,
va_arg(ap, uint64_t *), nelem);
break;
case DATA_TYPE_STRING:
ret = nvlist_add_string(payload, name,
va_arg(ap, char *));
break;
case DATA_TYPE_STRING_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_string_array(payload, name,
va_arg(ap, const char **), nelem);
break;
case DATA_TYPE_NVLIST:
ret = nvlist_add_nvlist(payload, name,
va_arg(ap, nvlist_t *));
break;
case DATA_TYPE_NVLIST_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_nvlist_array(payload, name,
va_arg(ap, const nvlist_t **), nelem);
break;
default:
ret = EINVAL;
}
name = va_arg(ap, char *);
}
return (ret);
}
void
fm_payload_set(nvlist_t *payload, ...)
{
int ret;
const char *name;
va_list ap;
va_start(ap, payload);
name = va_arg(ap, char *);
ret = i_fm_payload_set(payload, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an ereport event according to:
*
* Member name Type Value
* ====================================================
* class string ereport
* version uint8_t 0
* ena uint64_t <ena>
* detector nvlist_t <detector>
* ereport-payload nvlist_t <var args>
*
* We don't actually add a 'version' member to the payload. Really,
* the version quoted to us by our caller is that of the category 1
* "ereport" event class (and we require FM_EREPORT_VERS0) but
* the payload version of the actual leaf class event under construction
* may be something else. Callers should supply a version in the varargs,
* or (better) we could take two version arguments - one for the
* ereport category 1 classification (expect FM_EREPORT_VERS0) and one
* for the leaf class.
*/
void
fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
uint64_t ena, const nvlist_t *detector, ...)
{
char ereport_class[FM_MAX_CLASS];
const char *name;
va_list ap;
int ret;
if (version != FM_EREPORT_VERS0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
FM_EREPORT_CLASS, erpt_class);
if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
(nvlist_t *)detector) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
va_start(ap, detector);
name = va_arg(ap, const char *);
ret = i_fm_payload_set(ereport, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an hc fmri according to;
*
* Member name Type Value
* ===================================================
* version uint8_t 0
* auth nvlist_t <auth>
* hc-name string <name>
* hc-id string <id>
*
* Note that auth and hc-id are optional members.
*/
#define HC_MAXPAIRS 20
#define HC_MAXNAMELEN 50
static int
fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
{
if (version != FM_HC_SCHEME_VERSION) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
return (1);
}
void
fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
va_list ap;
int i;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = 0; i < npairs; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
va_end(ap);
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
(const nvlist_t **)pairs, npairs) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
for (i = 0; i < npairs; i++)
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
nvlist_t **hcl;
uint_t n;
int i, j;
va_list ap;
const char *hcname, *hcid;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
/*
* copy the bboard nvpairs to the pairs array
*/
if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
!= 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < n; i++) {
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
&hcname) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
/*
* create the pairs from passed in pairs
*/
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = n; i < npairs + n; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
va_end(ap);
return;
}
}
va_end(ap);
/*
* Create the fmri hc list
*/
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
(const nvlist_t **)pairs, npairs + n) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < npairs + n; i++) {
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
}
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
}
/*
* Set-up and validate the members of an dev fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* devpath string <devpath>
* [devid] string <devid>
* [target-port-l0id] string <target-port-lun0-id>
*
* Note that auth and devid are optional members.
*/
void
fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
const char *devpath, const char *devid, const char *tpl0)
{
int err = 0;
if (version != DEV_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
if (auth != NULL) {
err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
(nvlist_t *)auth);
}
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
if (devid != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
if (tpl0 != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
if (err)
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an cpu fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* cpuid uint32_t <cpu_id>
* cpumask uint8_t <cpu_mask>
* serial uint64_t <serial_id>
*
* Note that auth, cpumask, serial are optional members.
*
*/
void
fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
{
uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
if (version < CPU_SCHEME_VERSION1) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
FM_FMRI_SCHEME_CPU) != 0) {
atomic_inc_64(failedp);
return;
}
if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0)
atomic_inc_64(failedp);
if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
atomic_inc_64(failedp);
if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
*cpu_maskp) != 0)
atomic_inc_64(failedp);
if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
atomic_inc_64(failedp);
}
/*
* Set-up and validate the members of a mem according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth> [optional]
* unum string <unum>
* serial string <serial> [optional*]
* offset uint64_t <offset> [optional]
*
* * serial is required if offset is present
*/
void
fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
const char *unum, const char *serial, uint64_t offset)
{
if (version != MEM_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (!serial && (offset != (uint64_t)-1)) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (auth != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (serial != NULL) {
if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
(const char **)&serial, 1) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
FM_FMRI_MEM_OFFSET, offset) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
uint64_t vdev_guid)
{
if (version != ZFS_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (vdev_guid != 0) {
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
uint64_t
fm_ena_increment(uint64_t ena)
{
uint64_t new_ena;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
break;
case FM_ENA_FMT2:
new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
break;
default:
new_ena = 0;
}
return (new_ena);
}
uint64_t
fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
{
uint64_t ena = 0;
switch (format) {
case FM_ENA_FMT1:
if (timestamp) {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((timestamp << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
} else {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((gethrtime() << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
}
break;
case FM_ENA_FMT2:
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
break;
default:
break;
}
return (ena);
}
uint64_t
fm_ena_generate(uint64_t timestamp, uchar_t format)
{
uint64_t ena;
kpreempt_disable();
ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
kpreempt_enable();
return (ena);
}
uint64_t
fm_ena_generation_get(uint64_t ena)
{
uint64_t gen;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
break;
case FM_ENA_FMT2:
gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
break;
default:
gen = 0;
break;
}
return (gen);
}
uchar_t
fm_ena_format_get(uint64_t ena)
{
return (ENA_FORMAT(ena));
}
uint64_t
fm_ena_id_get(uint64_t ena)
{
uint64_t id;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
break;
case FM_ENA_FMT2:
id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
break;
default:
id = 0;
}
return (id);
}
uint64_t
fm_ena_time_get(uint64_t ena)
{
uint64_t time;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
break;
case FM_ENA_FMT2:
time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
break;
default:
time = 0;
}
return (time);
}
#ifdef _KERNEL
/*
* Helper function to increment ereport dropped count. Used by the event
* rate limiting code to give feedback to the user about how many events were
* rate limited by including them in the 'dropped' count.
*/
void
fm_erpt_dropped_increment(void)
{
atomic_inc_64(&ratelimit_dropped);
}
void
fm_init(void)
{
zevent_len_cur = 0;
zevent_flags = 0;
/* Initialize zevent allocation and generation kstats */
fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (fm_ksp != NULL) {
fm_ksp->ks_data = &erpt_kstat_data;
kstat_install(fm_ksp);
} else {
cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
}
mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zevent_list, sizeof (zevent_t),
offsetof(zevent_t, ev_node));
cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
zfs_ereport_init();
}
void
fm_fini(void)
{
uint_t count;
zfs_ereport_fini();
zfs_zevent_drain_all(&count);
mutex_enter(&zevent_lock);
cv_broadcast(&zevent_cv);
zevent_flags |= ZEVENT_SHUTDOWN;
while (zevent_waiters > 0) {
mutex_exit(&zevent_lock);
kpreempt(KPREEMPT_SYNC);
mutex_enter(&zevent_lock);
}
mutex_exit(&zevent_lock);
cv_destroy(&zevent_cv);
list_destroy(&zevent_list);
mutex_destroy(&zevent_lock);
if (fm_ksp != NULL) {
kstat_delete(fm_ksp);
fm_ksp = NULL;
}
}
#endif /* _KERNEL */
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, UINT, ZMOD_RW,
"Max event queue length");
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 24d52a74933f..176247d63b76 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6280 +1,6287 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to each disk before
* moving on to the next top-level vdev.
*/
static uint64_t metaslab_aliquot = 1024 * 1024;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
uint_t zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
static const int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
static uint_t zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
static uint_t zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
static uint_t zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = B_FALSE;
/*
* When set will prevent metaslabs from being unloaded.
*/
static int metaslab_debug_unload = B_FALSE;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
uint_t metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
static const uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
static int metaslab_df_use_largest_segment = B_FALSE;
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
static uint_t metaslab_unload_delay = 32;
static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
uint_t metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
static int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
static int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
static int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
static int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
static int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
static int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
static const boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
static const uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
static const int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
static uint_t zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
static const uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
static int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
static uint_t zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
gethrtime() > msp->ms_selected_time +
(uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
taskq_destroy(mg->mg_taskq);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1,
vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(mg->mg_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
if (msp->ms_group != mg)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
int flags, uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Some allocations (e.g., those coming from device removal
* where the * allocations are not even counted in the
* metaslab * allocation queues) are allowed to bypass
* the throttle.
*/
if (flags & METASLAB_DONT_THROTTLE)
return (B_TRUE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metasable group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
+__attribute__((always_inline)) inline
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
- if (likely(cmp))
- return (cmp);
- return (TREE_CMP(r1->rs_start, r2->rs_start));
+ return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
+__attribute__((always_inline)) inline
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
- if (likely(cmp))
- return (cmp);
- return (TREE_CMP(r1->rs_start, r2->rs_start));
+ return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
}
+
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
rs_set_start(&seg, rt, start);
rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
metaslab_size_tree_full_load(range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
+
+ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
+ range_seg32_t, metaslab_rangesize32_compare)
+
+ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
+ range_seg64_t, metaslab_rangesize64_compare)
+
/*
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
+ bt_find_in_buf_f bt_find;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = metaslab_rangesize32_compare;
+ bt_find = metaslab_rt_find_rangesize32_in_buf;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = metaslab_rangesize64_compare;
+ bt_find = metaslab_rt_find_rangesize64_in_buf;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
- zfs_btree_create(size_tree, compare, size);
+ zfs_btree_create(size_tree, compare, bt_find, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
(void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
(1ULL << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
static const range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
static range_seg_t *
metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
range_seg_t *rs;
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
#if defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
first_found = rs_get_start(rs, rt);
while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
uint64_t offset = rs_get_start(rs, rt);
if (offset + size <= rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
#endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_allocatable;
uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
rt)) {
offset = rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_df_alloc
};
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
size)
return (-1ULL);
*cursor = rs_get_start(rs, rt);
*cursor_end = rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_cf_alloc
};
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
rs_set_start(&rsearch, rt, *cursor);
rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
t = &msp->ms_allocatable_by_size;
rs_set_start(&rsearch, rt, 0);
rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
*cursor = rs_get_start(rs, rt) + size;
return (rs_get_start(rs, rt));
}
return (-1ULL);
}
const metaslab_ops_t zfs_metaslab_ops = {
metaslab_ndf_alloc
};
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
range_tree_space(msp->ms_unflushed_allocs) -
range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
range_tree_space(msp->ms_defer[0]) +
range_tree_space(msp->ms_defer[1]));
msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
msp->ms_deferspace + range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
sizeof (msp->ms_synchist));
} else {
memset(msp->ms_deferhist[hist_index], 0,
sizeof (msp->ms_deferhist[hist_index]));
}
memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
uint_t tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
#else
(void) mc, (void) zfs_metaslab_mem_limit;
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
&arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_remove, msp->ms_allocatable);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
range_tree_walk(msp->ms_freed,
range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)range_tree_space(msp->ms_freed),
(u_longlong_t)range_tree_space(msp->ms_defer[0]),
(u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
return (RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
return (RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it. For
* readonly pools there is no need to open the space map object.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
!spa->spa_read_spacemaps)) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = range_tree_create(NULL, type,
NULL, start, shift);
}
ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
metaslab_unflushed_dirty(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_allocs);
range_tree_destroy(msp->ms_checkpointing);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
range_tree_vacate(msp->ms_trim, NULL, NULL);
range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
ASSERT0(range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
taskq_wait_outstanding(mg->mg_taskq, 0);
return;
}
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
msp, TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
shift);
range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
range_tree_vacate(tmp_tree, NULL, NULL);
range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
static void
metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
mutex_enter(&spa->spa_flushed_ms_lock);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, B_TRUE);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
}
void
metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
metaslab_set_unflushed_dirty(msp, dirty);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* update log space map summary */
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
ms_prev_flushed_dirty);
spa_log_summary_add_flushed_metaslab(spa, dirty);
/* cleanup obsolete logs if any */
spa_cleanup_old_sm_logs(spa, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
metaslab_unflushed_txg(msp) == 0)
return;
metaslab_unflushed_bump(msp, tx, B_FALSE);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
ASSERT(range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
ASSERT0(range_tree_space(alloctree));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
ASSERT0(range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
if (range_tree_is_empty(alloctree) &&
range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
if (metaslab_unflushed_txg(msp) == 0)
metaslab_unflushed_add(msp, tx);
else if (!metaslab_unflushed_dirty(msp))
metaslab_unflushed_bump(msp, tx, B_TRUE);
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
msp->ms_allocated_space += range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
range_tree_space(msp->ms_freeing));
msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
if (!range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
range_tree_vacate(msp->ms_freeing,
range_tree_add, msp->ms_freed);
}
msp->ms_allocated_this_txg += range_tree_space(alloctree);
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
VERIFY0(range_tree_space(msp->ms_freed));
}
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
if (!defer_allowed) {
range_tree_walk(msp->ms_freed, range_tree_add,
msp->ms_trim);
}
} else {
range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
range_tree_swap(&msp->ms_freed, defer_tree);
} else {
range_tree_vacate(msp->ms_freed,
msp->ms_loaded ? range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
int flags, int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
uint_t tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled,
* skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
metaslab_group_t *mg, *fast_mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else if (flags & METASLAB_FASTWRITE) {
mg = fast_mg = mca->mca_rotor;
do {
if (fast_mg->mg_vd->vdev_pending_fastwrite <
mg->mg_vd->vdev_pending_fastwrite)
mg = fast_mg;
} while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
flags, psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
ASSERT(mg->mg_initialized);
/*
* Avoid writing single-copy data to an unhealthy,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if (vd->vdev_state < VDEV_STATE_HEALTHY &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_FASTWRITE) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
if (flags & METASLAB_FASTWRITE) {
atomic_add_64(&vd->vdev_pending_fastwrite,
psize);
}
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
memset(&dva[d], 0, sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
if (range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
* The potential race between _count() and _add() is covered
* by the allocator lock in most cases, or irrelevant due to
* GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
* But even if we assume some other non-existing scenario, the
* worst that can happen is few more I/Os get to allocation
* earlier, that is not a problem.
*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
- for (int d = 0; d < slots; d++)
- zfs_refcount_add(&mca->mca_alloc_slots, zio);
+ zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
- for (int d = 0; d < slots; d++)
- zfs_refcount_remove(&mca->mca_alloc_slots, zio);
+ zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
!range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
memset(&dva[d], 0, sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (bp->blk_birth <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
void
metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
atomic_add_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
void
metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
(void) inner, (void) arg;
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
* current allocating tree.
*/
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
{
ms->ms_unflushed_dirty = dirty;
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
boolean_t
metaslab_unflushed_dirty(metaslab_t *ms)
{
return (ms->ms_unflushed_dirty);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
"Blocks larger than this size are forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index 894c30fcae16..5174e2c46633 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -1,851 +1,867 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2019 by Delphix. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zio.h>
#include <sys/range_tree.h>
/*
* Range trees are tree-based data structures that can be used to
* track free space or generally any space allocation information.
* A range tree keeps track of individual segments and automatically
* provides facilities such as adjacent extent merging and extent
* splitting in response to range add/remove requests.
*
* A range tree starts out completely empty, with no segments in it.
* Adding an allocation via range_tree_add to the range tree can either:
* 1) create a new extent
* 2) extend an adjacent extent
* 3) merge two adjacent extents
* Conversely, removing an allocation via range_tree_remove can:
* 1) completely remove an extent
* 2) shorten an extent (if the allocation was near one of its ends)
* 3) split an extent into two extents, in effect punching a hole
*
* A range tree is also capable of 'bridging' gaps when adding
* allocations. This is useful for cases when close proximity of
* allocations is an important detail that needs to be represented
* in the range tree. See range_tree_set_gap(). The default behavior
* is not to bridge gaps (i.e. the maximum allowed gap size is 0).
*
* In order to traverse a range tree, use either the range_tree_walk()
* or range_tree_vacate() functions.
*
* To obtain more accurate information on individual segment
* operations that the range tree performs "under the hood", you can
* specify a set of callbacks by passing a range_tree_ops_t structure
* to the range_tree_create function. Any callbacks that are non-NULL
* are then called at the appropriate times.
*
* The range tree code also supports a special variant of range trees
* that can bridge small gaps between segments. This kind of tree is used
* by the dsl scanning code to group I/Os into mostly sequential chunks to
* optimize disk performance. The code here attempts to do this with as
* little memory and computational overhead as possible. One limitation of
* this implementation is that segments of range trees with gaps can only
* support removing complete segments.
*/
static inline void
rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
{
ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES);
size_t size = 0;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
break;
case RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
break;
default:
__builtin_unreachable();
}
memcpy(dest, src, size);
}
void
range_tree_stat_verify(range_tree_t *rt)
{
range_seg_t *rs;
zfs_btree_index_t where;
uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i;
for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
hist[idx]++;
ASSERT3U(hist[idx], !=, 0);
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
i, hist, (u_longlong_t)hist[i],
(u_longlong_t)rt->rt_histogram[i]);
}
VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
}
}
static void
range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
rt->rt_histogram[idx]++;
ASSERT3U(rt->rt_histogram[idx], !=, 0);
}
static void
range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
ASSERT3U(rt->rt_histogram[idx], !=, 0);
rt->rt_histogram[idx]--;
}
+__attribute__((always_inline)) inline
static int
range_tree_seg32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
+__attribute__((always_inline)) inline
static int
range_tree_seg64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
+__attribute__((always_inline)) inline
static int
range_tree_seg_gap_compare(const void *x1, const void *x2)
{
const range_seg_gap_t *r1 = x1;
const range_seg_gap_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
+ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg32_find_in_buf, range_seg32_t,
+ range_tree_seg32_compare)
+
+ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg64_find_in_buf, range_seg64_t,
+ range_tree_seg64_compare)
+
+ZFS_BTREE_FIND_IN_BUF_FUNC(range_tree_seg_gap_find_in_buf, range_seg_gap_t,
+ range_tree_seg_gap_compare)
+
range_tree_t *
range_tree_create_gap(const range_tree_ops_t *ops, range_seg_type_t type,
void *arg, uint64_t start, uint64_t shift, uint64_t gap)
{
range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
ASSERT3U(shift, <, 64);
ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES);
size_t size;
int (*compare) (const void *, const void *);
+ bt_find_in_buf_f bt_find;
switch (type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = range_tree_seg32_compare;
+ bt_find = range_tree_seg32_find_in_buf;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = range_tree_seg64_compare;
+ bt_find = range_tree_seg64_find_in_buf;
break;
case RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
compare = range_tree_seg_gap_compare;
+ bt_find = range_tree_seg_gap_find_in_buf;
break;
default:
panic("Invalid range seg type %d", type);
}
- zfs_btree_create(&rt->rt_root, compare, size);
+ zfs_btree_create(&rt->rt_root, compare, bt_find, size);
rt->rt_ops = ops;
rt->rt_gap = gap;
rt->rt_arg = arg;
rt->rt_type = type;
rt->rt_start = start;
rt->rt_shift = shift;
if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
rt->rt_ops->rtop_create(rt, rt->rt_arg);
return (rt);
}
range_tree_t *
range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type,
void *arg, uint64_t start, uint64_t shift)
{
return (range_tree_create_gap(ops, type, arg, start, shift, 0));
}
void
range_tree_destroy(range_tree_t *rt)
{
VERIFY0(rt->rt_space);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
zfs_btree_destroy(&rt->rt_root);
kmem_free(rt, sizeof (*rt));
}
void
range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
{
if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) {
zfs_panic_recover("zfs: attempting to decrease fill to or "
"below 0; probable double remove in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt));
}
if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) -
rs_get_start(rs, rt)) {
zfs_panic_recover("zfs: attempting to increase fill beyond "
"max; probable double add in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
}
static void
range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
{
range_tree_t *rt = arg;
zfs_btree_index_t where;
range_seg_t *rs_before, *rs_after, *rs;
range_seg_max_t tmp, rsearch;
uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0;
boolean_t merge_before, merge_after;
ASSERT3U(size, !=, 0);
ASSERT3U(fill, <=, size);
ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/*
* If this is a gap-supporting range tree, it is possible that we
* are inserting into an existing segment. In this case simply
* bump the fill count and call the remove / add callbacks. If the
* new range will extend an existing segment, we remove the
* existing one, apply the new extent to it and re-insert it using
* the normal code paths.
*/
if (rs != NULL) {
if (gap == 0) {
zfs_panic_recover("zfs: adding existent segment to "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
uint64_t rstart = rs_get_start(rs, rt);
uint64_t rend = rs_get_end(rs, rt);
if (rstart <= start && rend >= end) {
range_tree_adjust_fill(rt, rs, fill);
return;
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
range_tree_stat_decr(rt, rs);
rt->rt_space -= rend - rstart;
fill += rs_get_fill(rs, rt);
start = MIN(start, rstart);
end = MAX(end, rend);
size = end - start;
zfs_btree_remove(&rt->rt_root, rs);
range_tree_add_impl(rt, start, size, fill);
return;
}
ASSERT3P(rs, ==, NULL);
/*
* Determine whether or not we will have to merge with our neighbors.
* If gap != 0, we might need to merge with our neighbors even if we
* aren't directly touching.
*/
zfs_btree_index_t where_before, where_after;
rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >=
start - gap);
merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end +
gap);
if (merge_before && gap != 0)
bridge_size += start - rs_get_end(rs_before, rt);
if (merge_after && gap != 0)
bridge_size += rs_get_start(rs_after, rt) - end;
if (merge_before && merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
}
range_tree_stat_decr(rt, rs_before);
range_tree_stat_decr(rt, rs_after);
rs_copy(rs_after, &tmp, rt);
uint64_t before_start = rs_get_start_raw(rs_before, rt);
uint64_t before_fill = rs_get_fill(rs_before, rt);
uint64_t after_fill = rs_get_fill(rs_after, rt);
zfs_btree_remove_idx(&rt->rt_root, &where_before);
/*
* We have to re-find the node because our old reference is
* invalid as soon as we do any mutating btree operations.
*/
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
ASSERT3P(rs_after, !=, NULL);
rs_set_start_raw(rs_after, rt, before_start);
rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
rs = rs_after;
} else if (merge_before) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
range_tree_stat_decr(rt, rs_before);
uint64_t before_fill = rs_get_fill(rs_before, rt);
rs_set_end(rs_before, rt, end);
rs_set_fill(rs_before, rt, before_fill + fill);
rs = rs_before;
} else if (merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
range_tree_stat_decr(rt, rs_after);
uint64_t after_fill = rs_get_fill(rs_after, rt);
rs_set_start(rs_after, rt, start);
rs_set_fill(rs_after, rt, after_fill + fill);
rs = rs_after;
} else {
rs = &tmp;
rs_set_start(rs, rt, start);
rs_set_end(rs, rt, end);
rs_set_fill(rs, rt, fill);
zfs_btree_add_idx(&rt->rt_root, rs, &where);
}
if (gap != 0) {
ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) -
rs_get_start(rs, rt));
} else {
ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
range_tree_stat_incr(rt, rs);
rt->rt_space += size + bridge_size;
}
void
range_tree_add(void *arg, uint64_t start, uint64_t size)
{
range_tree_add_impl(arg, start, size, size);
}
static void
range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
boolean_t do_fill)
{
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size;
boolean_t left_over, right_over;
VERIFY3U(size, !=, 0);
VERIFY3U(size, <=, rt->rt_space);
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/* Make sure we completely overlap with someone */
if (rs == NULL) {
zfs_panic_recover("zfs: removing nonexistent segment from "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
/*
* Range trees with gap support must only remove complete segments
* from the tree. This allows us to maintain accurate fill accounting
* and to ensure that bridged sections are not leaked. If we need to
* remove less than the full segment, we can only adjust the fill count.
*/
if (rt->rt_gap != 0) {
if (do_fill) {
if (rs_get_fill(rs, rt) == size) {
start = rs_get_start(rs, rt);
end = rs_get_end(rs, rt);
size = end - start;
} else {
range_tree_adjust_fill(rt, rs, -size);
return;
}
} else if (rs_get_start(rs, rt) != start ||
rs_get_end(rs, rt) != end) {
zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llx size=%llx) of "
"(offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size,
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt) - rs_get_start(rs,
rt));
return;
}
}
VERIFY3U(rs_get_start(rs, rt), <=, start);
VERIFY3U(rs_get_end(rs, rt), >=, end);
left_over = (rs_get_start(rs, rt) != start);
right_over = (rs_get_end(rs, rt) != end);
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) {
range_seg_max_t newseg;
rs_set_start(&newseg, rt, end);
rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt));
rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end);
range_tree_stat_incr(rt, &newseg);
// This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt);
if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
else
zfs_btree_add(&rt->rt_root, &newseg);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
} else if (left_over) {
// This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt);
} else if (right_over) {
// This modifies the buffer already inside the range tree
rs_set_start(rs, rt, end);
rs_copy(rs, &rs_tmp, rt);
} else {
zfs_btree_remove_idx(&rt->rt_root, &where);
rs = NULL;
}
if (rs != NULL) {
/*
* The fill of the leftover segment will always be equal to
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) -
rs_get_start_raw(rs, rt));
range_tree_stat_incr(rt, &rs_tmp);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
}
rt->rt_space -= size;
}
void
range_tree_remove(void *arg, uint64_t start, uint64_t size)
{
range_tree_remove_impl(arg, start, size, B_FALSE);
}
void
range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_tree_remove_impl(rt, start, size, B_TRUE);
}
void
range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
uint64_t newstart, uint64_t newsize)
{
int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt));
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_start(rs, rt, newstart);
rs_set_end(rs, rt, newstart + newsize);
range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
rt->rt_space += delta;
}
static range_seg_t *
range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_max_t rsearch;
uint64_t end = start + size;
VERIFY(size != 0);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
}
range_seg_t *
range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
{
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
range_seg_t *rs = range_tree_find_impl(rt, start, size);
if (rs != NULL && rs_get_start(rs, rt) <= start &&
rs_get_end(rs, rt) >= start + size) {
return (rs);
}
return (NULL);
}
void
range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
{
range_seg_t *rs = range_tree_find(rt, off, size);
if (rs != NULL)
panic("segment already in tree; rs=%p", (void *)rs);
}
boolean_t
range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
{
return (range_tree_find(rt, start, size) != NULL);
}
/*
* Returns the first subset of the given range which overlaps with the range
* tree. Returns true if there is a segment in the range, and false if there
* isn't.
*/
boolean_t
range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
uint64_t *ostart, uint64_t *osize)
{
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1);
zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
if (rs != NULL) {
*ostart = start;
*osize = MIN(size, rs_get_end(rs, rt) - start);
return (B_TRUE);
}
rs = zfs_btree_next(&rt->rt_root, &where, &where);
if (rs == NULL || rs_get_start(rs, rt) > start + size)
return (B_FALSE);
*ostart = rs_get_start(rs, rt);
*osize = MIN(start + size, rs_get_end(rs, rt)) -
rs_get_start(rs, rt);
return (B_TRUE);
}
/*
* Ensure that this range is not in the tree, regardless of whether
* it is currently in the tree.
*/
void
range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_t *rs;
if (size == 0)
return;
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
uint64_t free_start = MAX(rs_get_start(rs, rt), start);
uint64_t free_end = MIN(rs_get_end(rs, rt), start + size);
range_tree_remove(rt, free_start, free_end - free_start);
}
}
void
range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
{
range_tree_t *rt;
ASSERT0(range_tree_space(*rtdst));
ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
rt = *rtsrc;
*rtsrc = *rtdst;
*rtdst = rt;
}
void
range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
if (func != NULL) {
range_seg_t *rs;
zfs_btree_index_t *cookie = NULL;
while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
NULL) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
} else {
zfs_btree_clear(&rt->rt_root);
}
memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}
void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
}
range_seg_t *
range_tree_first(range_tree_t *rt)
{
return (zfs_btree_first(&rt->rt_root, NULL));
}
uint64_t
range_tree_space(range_tree_t *rt)
{
return (rt->rt_space);
}
uint64_t
range_tree_numsegs(range_tree_t *rt)
{
return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
}
boolean_t
range_tree_is_empty(range_tree_t *rt)
{
ASSERT(rt != NULL);
return (range_tree_space(rt) == 0);
}
/*
* Remove any overlapping ranges between the given segment [start, end)
* from removefrom. Add non-overlapping leftovers to addto.
*/
void
range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
range_tree_t *removefrom, range_tree_t *addto)
{
zfs_btree_index_t where;
range_seg_max_t starting_rs;
rs_set_start(&starting_rs, removefrom, start);
rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs,
removefrom) + 1);
range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
&starting_rs, &where);
if (curr == NULL)
curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
range_seg_t *next;
for (; curr != NULL; curr = next) {
if (start == end)
return;
VERIFY3U(start, <, end);
/* there is no overlap */
if (end <= rs_get_start(curr, removefrom)) {
range_tree_add(addto, start, end - start);
return;
}
uint64_t overlap_start = MAX(rs_get_start(curr, removefrom),
start);
uint64_t overlap_end = MIN(rs_get_end(curr, removefrom),
end);
uint64_t overlap_size = overlap_end - overlap_start;
ASSERT3S(overlap_size, >, 0);
range_seg_max_t rs;
rs_copy(curr, &rs, removefrom);
range_tree_remove(removefrom, overlap_start, overlap_size);
if (start < overlap_start)
range_tree_add(addto, start, overlap_start - start);
start = overlap_end;
next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
/*
* If we find something here, we only removed part of the
* curr segment. Either there's some left at the end
* because we've reached the end of the range we're removing,
* or there's some left at the start because we started
* partway through the range. Either way, we continue with
* the loop. If it's the former, we'll return at the start of
* the loop, and if it's the latter we'll see if there is more
* area to process.
*/
if (next != NULL) {
ASSERT(start == end || start == rs_get_end(&rs,
removefrom));
}
next = zfs_btree_next(&removefrom->rt_root, &where, &where);
}
VERIFY3P(curr, ==, NULL);
if (start != end) {
VERIFY3U(start, <, end);
range_tree_add(addto, start, end - start);
} else {
VERIFY3U(start, ==, end);
}
}
/*
* For each entry in rt, if it exists in removefrom, remove it
* from removefrom. Otherwise, add it to addto.
*/
void
range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
range_tree_t *addto)
{
zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
range_tree_remove_xor_add_segment(rs_get_start(rs, rt),
rs_get_end(rs, rt), removefrom, addto);
}
}
uint64_t
range_tree_min(range_tree_t *rt)
{
range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_start(rs, rt) : 0);
}
uint64_t
range_tree_max(range_tree_t *rt)
{
range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_end(rs, rt) : 0);
}
uint64_t
range_tree_span(range_tree_t *rt)
{
return (range_tree_max(rt) - range_tree_min(rt));
}
diff --git a/sys/contrib/openzfs/module/zfs/refcount.c b/sys/contrib/openzfs/module/zfs/refcount.c
index 62ec03e1035a..601d27f8c47a 100644
--- a/sys/contrib/openzfs/module/zfs/refcount.c
+++ b/sys/contrib/openzfs/module/zfs/refcount.c
@@ -1,335 +1,350 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/zfs_refcount.h>
#ifdef ZFS_DEBUG
/*
* Reference count tracking is disabled by default. It's memory requirements
* are reasonable, however as implemented it consumes a significant amount of
* cpu time. Until its performance is improved it should be manually enabled.
*/
int reference_tracking_enable = B_FALSE;
static uint_t reference_history = 3; /* tunable */
static kmem_cache_t *reference_cache;
static kmem_cache_t *reference_history_cache;
void
zfs_refcount_init(void)
{
reference_cache = kmem_cache_create("reference_cache",
sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
reference_history_cache = kmem_cache_create("reference_history_cache",
sizeof (uint64_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
zfs_refcount_fini(void)
{
kmem_cache_destroy(reference_cache);
kmem_cache_destroy(reference_history_cache);
}
void
zfs_refcount_create(zfs_refcount_t *rc)
{
mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
list_create(&rc->rc_list, sizeof (reference_t),
offsetof(reference_t, ref_link));
list_create(&rc->rc_removed, sizeof (reference_t),
offsetof(reference_t, ref_link));
rc->rc_count = 0;
rc->rc_removed_count = 0;
rc->rc_tracked = reference_tracking_enable;
}
void
zfs_refcount_create_tracked(zfs_refcount_t *rc)
{
zfs_refcount_create(rc);
rc->rc_tracked = B_TRUE;
}
void
zfs_refcount_create_untracked(zfs_refcount_t *rc)
{
zfs_refcount_create(rc);
rc->rc_tracked = B_FALSE;
}
void
zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
{
reference_t *ref;
ASSERT3U(rc->rc_count, ==, number);
- while ((ref = list_head(&rc->rc_list))) {
- list_remove(&rc->rc_list, ref);
+ while ((ref = list_remove_head(&rc->rc_list)))
kmem_cache_free(reference_cache, ref);
- }
list_destroy(&rc->rc_list);
- while ((ref = list_head(&rc->rc_removed))) {
- list_remove(&rc->rc_removed, ref);
+ while ((ref = list_remove_head(&rc->rc_removed))) {
kmem_cache_free(reference_history_cache, ref->ref_removed);
kmem_cache_free(reference_cache, ref);
}
list_destroy(&rc->rc_removed);
mutex_destroy(&rc->rc_mtx);
}
void
zfs_refcount_destroy(zfs_refcount_t *rc)
{
zfs_refcount_destroy_many(rc, 0);
}
int
zfs_refcount_is_zero(zfs_refcount_t *rc)
{
return (zfs_refcount_count(rc) == 0);
}
int64_t
zfs_refcount_count(zfs_refcount_t *rc)
{
return (atomic_load_64(&rc->rc_count));
}
int64_t
zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, const void *holder)
{
reference_t *ref = NULL;
int64_t count;
if (!rc->rc_tracked) {
count = atomic_add_64_nv(&(rc)->rc_count, number);
ASSERT3U(count, >=, number);
return (count);
}
ref = kmem_cache_alloc(reference_cache, KM_SLEEP);
ref->ref_holder = holder;
ref->ref_number = number;
mutex_enter(&rc->rc_mtx);
list_insert_head(&rc->rc_list, ref);
rc->rc_count += number;
count = rc->rc_count;
mutex_exit(&rc->rc_mtx);
return (count);
}
int64_t
zfs_refcount_add(zfs_refcount_t *rc, const void *holder)
{
return (zfs_refcount_add_many(rc, 1, holder));
}
+void
+zfs_refcount_add_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
+{
+ if (!rc->rc_tracked)
+ (void) zfs_refcount_add_many(rc, number, holder);
+ else for (; number > 0; number--)
+ (void) zfs_refcount_add(rc, holder);
+}
+
int64_t
zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
const void *holder)
{
reference_t *ref;
int64_t count;
if (!rc->rc_tracked) {
count = atomic_add_64_nv(&(rc)->rc_count, -number);
ASSERT3S(count, >=, 0);
return (count);
}
mutex_enter(&rc->rc_mtx);
ASSERT3U(rc->rc_count, >=, number);
for (ref = list_head(&rc->rc_list); ref;
ref = list_next(&rc->rc_list, ref)) {
if (ref->ref_holder == holder && ref->ref_number == number) {
list_remove(&rc->rc_list, ref);
if (reference_history > 0) {
ref->ref_removed =
kmem_cache_alloc(reference_history_cache,
KM_SLEEP);
list_insert_head(&rc->rc_removed, ref);
rc->rc_removed_count++;
if (rc->rc_removed_count > reference_history) {
ref = list_tail(&rc->rc_removed);
list_remove(&rc->rc_removed, ref);
kmem_cache_free(reference_history_cache,
ref->ref_removed);
kmem_cache_free(reference_cache, ref);
rc->rc_removed_count--;
}
} else {
kmem_cache_free(reference_cache, ref);
}
rc->rc_count -= number;
count = rc->rc_count;
mutex_exit(&rc->rc_mtx);
return (count);
}
}
panic("No such hold %p on refcount %llx", holder,
(u_longlong_t)(uintptr_t)rc);
return (-1);
}
int64_t
zfs_refcount_remove(zfs_refcount_t *rc, const void *holder)
{
return (zfs_refcount_remove_many(rc, 1, holder));
}
+void
+zfs_refcount_remove_few(zfs_refcount_t *rc, uint64_t number, const void *holder)
+{
+ if (!rc->rc_tracked)
+ (void) zfs_refcount_remove_many(rc, number, holder);
+ else for (; number > 0; number--)
+ (void) zfs_refcount_remove(rc, holder);
+}
+
void
zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
{
int64_t count, removed_count;
list_t list, removed;
list_create(&list, sizeof (reference_t),
offsetof(reference_t, ref_link));
list_create(&removed, sizeof (reference_t),
offsetof(reference_t, ref_link));
mutex_enter(&src->rc_mtx);
count = src->rc_count;
removed_count = src->rc_removed_count;
src->rc_count = 0;
src->rc_removed_count = 0;
list_move_tail(&list, &src->rc_list);
list_move_tail(&removed, &src->rc_removed);
mutex_exit(&src->rc_mtx);
mutex_enter(&dst->rc_mtx);
dst->rc_count += count;
dst->rc_removed_count += removed_count;
list_move_tail(&dst->rc_list, &list);
list_move_tail(&dst->rc_removed, &removed);
mutex_exit(&dst->rc_mtx);
list_destroy(&list);
list_destroy(&removed);
}
void
zfs_refcount_transfer_ownership_many(zfs_refcount_t *rc, uint64_t number,
const void *current_holder, const void *new_holder)
{
reference_t *ref;
boolean_t found = B_FALSE;
if (!rc->rc_tracked)
return;
mutex_enter(&rc->rc_mtx);
for (ref = list_head(&rc->rc_list); ref;
ref = list_next(&rc->rc_list, ref)) {
if (ref->ref_holder == current_holder &&
ref->ref_number == number) {
ref->ref_holder = new_holder;
found = B_TRUE;
break;
}
}
ASSERT(found);
mutex_exit(&rc->rc_mtx);
}
void
zfs_refcount_transfer_ownership(zfs_refcount_t *rc, const void *current_holder,
const void *new_holder)
{
return (zfs_refcount_transfer_ownership_many(rc, 1, current_holder,
new_holder));
}
/*
* If tracking is enabled, return true if a reference exists that matches
* the "holder" tag. If tracking is disabled, then return true if a reference
* might be held.
*/
boolean_t
zfs_refcount_held(zfs_refcount_t *rc, const void *holder)
{
reference_t *ref;
if (!rc->rc_tracked)
return (zfs_refcount_count(rc) > 0);
mutex_enter(&rc->rc_mtx);
for (ref = list_head(&rc->rc_list); ref;
ref = list_next(&rc->rc_list, ref)) {
if (ref->ref_holder == holder) {
mutex_exit(&rc->rc_mtx);
return (B_TRUE);
}
}
mutex_exit(&rc->rc_mtx);
return (B_FALSE);
}
/*
* If tracking is enabled, return true if a reference does not exist that
* matches the "holder" tag. If tracking is disabled, always return true
* since the reference might not be held.
*/
boolean_t
zfs_refcount_not_held(zfs_refcount_t *rc, const void *holder)
{
reference_t *ref;
if (!rc->rc_tracked)
return (B_TRUE);
mutex_enter(&rc->rc_mtx);
for (ref = list_head(&rc->rc_list); ref;
ref = list_next(&rc->rc_list, ref)) {
if (ref->ref_holder == holder) {
mutex_exit(&rc->rc_mtx);
return (B_FALSE);
}
}
mutex_exit(&rc->rc_mtx);
return (B_TRUE);
}
EXPORT_SYMBOL(zfs_refcount_create);
EXPORT_SYMBOL(zfs_refcount_destroy);
EXPORT_SYMBOL(zfs_refcount_is_zero);
EXPORT_SYMBOL(zfs_refcount_count);
EXPORT_SYMBOL(zfs_refcount_add);
EXPORT_SYMBOL(zfs_refcount_remove);
EXPORT_SYMBOL(zfs_refcount_held);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW,
"Track reference holders to refcount_t objects");
ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW,
"Maximum reference holders being tracked");
/* END CSTYLED */
#endif /* ZFS_DEBUG */
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 1fc2c5e8c55d..88ee4ea9f458 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,10178 +1,10181 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
+ * Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type,
const char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
static uint_t zio_taskq_batch_tpq; /* threads per taskq */
static const boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
static const uint_t zio_taskq_basedc = 80; /* base duty cycle */
static const boolean_t spa_create_process = B_TRUE; /* no process => no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* Allow read spacemaps in case of readonly import (spa_mode == SPA_MODE_READ).
* This is used by zdb for spacemaps verification.
*/
boolean_t spa_mode_readable_spacemaps = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
static int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
uint64_t zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
static const boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
static int zfs_livelist_condense_zthr_pause = 0;
static int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
static int zfs_livelist_condense_sync_cancel = 0;
static int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
static int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
/*
* Add a user property (source=src, propname=propval) to an nvlist.
*/
static void
spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
zprop_source_t src)
{
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONEUSED, NULL,
brt_get_used(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONESAVED, NULL,
brt_get_saved(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_BCLONERATIO, NULL,
brt_get_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) ==
ZPOOL_PROP_INVAL && !zfs_prop_user(za.za_name))
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
if (prop != ZPOOL_PROP_INVAL) {
spa_prop_add_list(*nvp, prop, strval, 0, src);
} else {
src = ZPROP_SRC_LOCAL;
spa_prop_add_user(*nvp, za.za_name, strval,
src);
}
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
const char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
/*
* Sanitize the input.
*/
if (zfs_prop_user(propname)) {
if (strlen(propname) >= ZAP_MAXNAMELEN) {
error = SET_ERROR(ENAMETOOLONG);
break;
}
if (strlen(fnvpair_value_string(elem)) >=
ZAP_MAXVALUELEN) {
error = SET_ERROR(E2BIG);
break;
}
} else if (zpool_prop_feature(propname)) {
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
} else {
error = SET_ERROR(EINVAL);
break;
}
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
const char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_INVAL &&
zfs_prop_user(nvpair_name(elem))) {
need_sync = B_TRUE;
break;
}
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
/*
* Clear the kobj flag from all the vdevs to allow
* vdev_cache_process_kobj_evt() to post events to all the
* vdevs since GUID is updated.
*/
vdev_clear_kobj_evt(spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
vdev_clear_kobj_evt(spa->spa_l2cache.sav_vdevs[i]);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
memcpy(last, &spa->spa_errlist_last, sizeof (avl_tree_t));
memcpy(scrub, &spa->spa_errlist_scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
(void) zio_taskq_basedc;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_read_spacemaps = spa_mode_readable_spacemaps;
spa->spa_normal_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, &zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, &zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
(void) spa_create_process;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_healed,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_activate_os(spa);
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
avl_destroy(&spa->spa_errlist_healed);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
spa_deactivate_os(spa);
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
+ log_summary_entry_t *e;
+
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
- for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
- e != NULL; e = list_head(&spa->spa_log_summary)) {
+ while ((e = list_remove_head(&spa->spa_log_summary)) != NULL) {
VERIFY0(e->lse_mscount);
- list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If we have set the spa_final_txg, we have already performed the
* tasks below in spa_export_common(). We should not redo it here since
* we delay the final TXGs beyond what spa_final_txg is set at.
*/
if (spa->spa_final_txg == UINT64_MAX) {
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev,
VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
if (vc->vdev_mg != NULL)
taskq_wait(vc->vdev_mg->mg_taskq);
}
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
brt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
if (spa->spa_spares.sav_vdevs) {
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
if (spa->spa_l2cache.sav_vdevs) {
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
if (spa->spa_spares.sav_vdevs) {
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
}
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
fnvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
spa->spa_spares.sav_count);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
fnvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, sav->sav_count);
out:
/*
* Purge vdevs that were dropped
*/
if (oldvdevs) {
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
}
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
nv = fnvlist_alloc();
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
(const nvlist_t * const *)child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
boolean_t sle_verify_data;
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
static uint_t spa_load_verify_shift = 4;
static int spa_load_verify_metadata = B_TRUE;
static int spa_load_verify_data = B_TRUE;
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zio_t *rio = arg;
spa_load_error_t *sle = rio->io_private;
(void) zilog, (void) dnp;
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
/*
* Sanity check the block pointer in order to detect obvious damage
* before using the contents in subsequent checks or in zio_read().
* When damaged consider it to be a metadata error since we cannot
* trust the BP_GET_TYPE and BP_GET_LEVEL values.
*/
if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
atomic_inc_64(&sle->sle_meta_count);
return (0);
}
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
if (!BP_IS_METADATA(bp) &&
(!spa_load_verify_data || !sle->sle_verify_data))
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) dp, (void) arg;
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND ||
policy.zlp_maxmeta == UINT64_MAX)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
/*
* Verify data only if we are rewinding or error limit was set.
* Otherwise nothing except dbgmsg care about it to waste time.
*/
sle.sle_verify_data = (policy.zlp_rewind & ZPOOL_REWIND_MASK) ||
(policy.zlp_maxdata < UINT64_MAX);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_LOAD_TIME,
spa->spa_load_txg_ts);
fnvlist_add_int64(spa->spa_load_info, ZPOOL_CONFIG_REWIND_TIME,
loss);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_META_ERRORS, sle.sle_meta_count);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
(void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
const char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
vd->vdev_root_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_root_zap));
}
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#else
#define vdev_count_verify_zaps(vd) ((void) sizeof (vd), 0)
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
const char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
const char *comment;
const char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
spa->spa_label_features = fnvlist_dup(features);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
unsup_feat = fnvlist_alloc();
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
fnvlist_add_string(unsup_feat,
nvpair_name(nvp), "");
}
}
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
nvlist_free(mos_config);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_spacemaps failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_load_brt(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = brt_load(spa);
if (error != 0) {
spa_load_failed(spa, "brt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
error = spa_ld_load_brt(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, const void *tag,
nvlist_t *nvpolicy, nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
*config = fnvlist_dup(spa->spa_config);
fnvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER && config != NULL) {
fnvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
nvlist_t *policy, nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, const void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares));
if (nspares != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
(const nvlist_t * const *)spares, nspares);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares));
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
guid = fnvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID);
VERIFY0(nvlist_lookup_uint64_array(spares[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
} else {
vs->vs_state =
spa->spa_spares.sav_vdevs[i]->vdev_state;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
VERIFY0(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache));
if (nl2cache != 0) {
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
(const nvlist_t * const *)l2cache, nl2cache);
VERIFY0(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache));
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
guid = fnvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY0(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc));
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
fnvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_approx_errlog_size(spa));
if (spa_suspended(spa)) {
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode);
fnvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
fnvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs));
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
newdevs[i] = fnvlist_dup(olddevs[i]);
for (i = 0; i < ndevs; i++)
newdevs[i + oldndevs] = fnvlist_dup(devs[i]);
fnvlist_remove(sav->sav_config, config);
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)newdevs, ndevs + oldndevs);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
sav->sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(sav->sav_config, config,
(const nvlist_t * const *)devs, ndevs);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
const char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
const char *feat_name;
const char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY0(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP));
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
/*
* Create BRT table and BRT table object.
*/
brt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
spa_import_os(spa);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
const char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, spa->spa_load_info);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
fnvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES);
else
spa->spa_spares.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, (const nvlist_t * const *)spares,
nspares);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
fnvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE);
else
spa->spa_l2cache.sav_config = fnvlist_alloc();
fnvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, (const nvlist_t * const *)l2cache,
nl2cache);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
spa_import_os(spa);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
const char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
/*
* spa_import() relies on a pool config fetched by spa_try_import()
* for spare/cache devices. Import flags are not passed to
* spa_tryimport(), which makes it return early due to a missing log
* device and missing retrieving the cache device and spare eventually.
* Passing ZFS_IMPORT_MISSING_LOG to spa_tryimport() makes it fetch
* the correct configuration regardless of the missing log device.
*/
spa->spa_import_flags |= ZFS_IMPORT_MISSING_LOG;
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, poolname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
fnvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp);
fnvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info);
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
fnvlist_add_string(config, ZPOOL_CONFIG_BOOTFS,
dsname);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
vdev_t *rvd = spa->spa_root_vdev;
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
* time flushing as many metaslabs as we can in an attempt to
* destroy log space maps and save import time. This has to be
* done before we set the spa_final_txg, otherwise
* spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
* spa_should_flush_logs_on_unload() should be called after
* spa_state has been set to the new_state.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
spa_export_os(spa);
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
*oldconfig = fnvlist_dup(spa->spa_config);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
- if (dsl_scan_resilvering(spa_get_dsl(spa)))
+ if (dsl_scan_resilvering(spa_get_dsl(spa)) ||
+ dsl_scan_resilver_scheduled(spa_get_dsl(spa))) {
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
+ }
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* log, dedup and special vdevs should not be replaced by spares.
*/
if ((oldvd->vdev_top->vdev_alloc_bias != VDEV_BIAS_NONE ||
oldvd->vdev_top->vdev_islog) && newvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing or a spare vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_UNINIT &&
vd->vdev_initialize_thread != NULL) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
case POOL_INITIALIZE_UNINIT:
vdev_uninitialize(vd);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
const char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize);
fnvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
nvl = fnvlist_alloc();
fnvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, glist, children);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
fnvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, nvl);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE);
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL));
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
newspa->spa_config_splitting = fnvlist_alloc();
fnvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa));
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
if (func == POOL_SCAN_ERRORSCRUB &&
!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
return (SET_ERROR(ENOTSUP));
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static __attribute__((noreturn)) void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE ||
tasks & SPA_ASYNC_DETACH_SPARE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
nvroot = fnvlist_alloc();
if (sav->sav_count == 0) {
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)NULL, 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
fnvlist_add_nvlist_array(nvroot, config,
(const nvlist_t * const *)list, sav->sav_count);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_root_zap != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_root_zap, tx));
}
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
const char *strval, *fname;
zpool_prop_t prop;
const char *propname;
const char *elemname = nvpair_name(elem);
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(elemname)) {
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_INVAL:
if (zpool_prop_feature(elemname)) {
fname = strchr(elemname, '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", elemname);
break;
} else if (!zfs_prop_user(elemname)) {
ASSERT(zpool_prop_feature(elemname));
break;
}
zfs_fallthrough;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
if (prop == ZPOOL_PROP_INVAL) {
propname = elemname;
proptype = PROP_TYPE_STRING;
} else {
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", elemname, strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", elemname,
(longlong_t)intval);
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
} else {
ASSERT(0); /* not allowed */
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
brt_sync(spa, txg);
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
dsl_errorscrub_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Now that there can be no more cloning in this transaction group,
* but we are still before issuing frees, we can process pending BRT
* updates.
*/
brt_pending_apply(spa, txg);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
/* Avoid holding the write lock unless actually necessary */
if (vd->vdev_aux == NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
continue;
}
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON)
vdev_autotrim_kick(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
static boolean_t
spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
{
(void) spa;
int i;
uint64_t vdev_guid;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&vdev_guid) == 0 && vdev_guid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
boolean_t
spa_has_l2cache(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache));
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
return (spa_has_aux_vdev(spa, guid, &spa->spa_spares));
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
zfs_fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#else
(void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#else
(void) ev;
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZMOD_RW, "Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
ZMOD_RW, "Set the livelist condense synctask to pause");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel,
INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 89e1ce7165db..9ef948e9e434 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -1,3007 +1,3004 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/zfs_chksum.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/btree.h>
#include <sys/zfeature.h>
#include <sys/qat.h>
#include <sys/zstd/zstd.h>
/*
* SPA locking
*
* There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
static const int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
* Everything except dprintf, set_error, spa, and indirect_remap is on
* by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = B_TRUE;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
const char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
uint_t spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed (bounded by spa_max_slop). This ensures that we
* don't run the pool completely out of space, due to unaccounted changes (e.g.
* to the MOS). It also limits the worst-case time to allocate space. If we
* have less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
* also part of this 3.2% of space which can't be consumed by normal writes;
* the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
* log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* Operations that are almost guaranteed to free up space in the absence of
* a pool checkpoint can use up to three quarters of the slop space
* (e.g zfs destroy).
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
* increase in the amount of space used, it is possible to run the pool
* completely out of space, causing it to be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* Further, on very large pools, the slop space will be smaller than
* 3.2%, to avoid reserving much more space than we actually need; bounded
* by spa_max_slop (128GB).
*
* See also the comments in zfs_space_check_t.
*/
uint_t spa_slop_shift = 5;
static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
static const int spa_allocators = 4;
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*
* By default dedup and user data indirects land in the special class
*/
static int zfs_ddt_data_is_special = B_TRUE;
static int zfs_user_indirect_is_special = B_TRUE;
/*
* The percentage of special class final space reserved for metadata only.
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
* let metadata into the class.
*/
static uint_t zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
scl->scl_count = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
ASSERT(scl->scl_count == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (scl->scl_count != 0) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
return (1);
}
static void
spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
int mmp_flag)
{
(void) tag;
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer ||
(!mmp_flag && scl->scl_write_wanted)) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (scl->scl_count != 0) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 0);
}
/*
* The spa_config_enter_mmp() allows the mmp thread to cut in front of
* outstanding write lock requests. This is needed since the mmp updates are
* time sensitive and failure to service them promptly will result in a
* suspended pool. This pool suspension has been seen in practice when there is
* a single disk in a pool that is responding slowly and presumably about to
* fail.
*/
void
spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
{
spa_config_enter_impl(spa, locks, tag, rw, 1);
}
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
(void) tag;
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(scl->scl_count > 0);
if (--scl->scl_count == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER && scl->scl_count != 0) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
(u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
const spa_log_sm_t *b = vb;
return (TREE_CMP(a->sls_txg, b->sls_txg));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_trust_config = B_TRUE;
spa->spa_hostid = zone_get_hostid(NULL);
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
zfs_refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
spa->spa_alloc_count = spa_allocators;
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
sizeof (spa_alloc_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
NULL);
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_alloc_node));
}
avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
list_create(&spa->spa_leaf_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_leaf_node));
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
- while ((dp = list_head(&spa->spa_config_list)) != NULL) {
- list_remove(&spa->spa_config_list, dp);
+ while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
avl_destroy(&spa->spa_allocs[i].spaa_tree);
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
}
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
sizeof (spa_alloc_t));
avl_destroy(&spa->spa_metaslabs_by_flushed);
avl_destroy(&spa->spa_sm_logs_by_txg);
list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
zfs_refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, const void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
return (spa_vdev_config_enter(spa));
}
/*
* The same as spa_vdev_enter() above but additionally takes the guid of
* the vdev being detached. When there is a rebuild in process it will be
* suspended while the vdev tree is modified then resumed by spa_vdev_exit().
* The rebuild is canceled if only a single child remains after the detach.
*/
uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
if (guid != 0) {
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd) {
vdev_rebuild_stop_wait(vd->vdev_top);
}
}
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
const char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
}
/*
* The vdev may be both a leaf and top-level device.
*/
vdev_autotrim_stop_wait(vd);
spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_STATE_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
vdev_autotrim_restart(spa);
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(8) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
memcpy(new, s, len + 1);
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid;
if (spa != NULL) {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
} else {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(guid, 0));
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
const char *checksum = NULL;
const char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
void
spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
{
/*
* We bump the feature refcount for each special vdev added to the pool
*/
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
boolean_t
spa_indirect_vdevs_loaded(spa_t *spa)
{
return (spa->spa_indirect_vdevs_loaded);
}
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strlcpy(buf, spa->spa_root, buflen);
}
uint32_t
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
* the value of spa_max_slop. The embedded log space is not included in
* spa_dspace. By subtracting it, the usable space (per "zfs list") is a
* constant 97% of the total space, regardless of metaslab size (assuming the
* default spa_slop_shift=5 and a non-tiny pool).
*
* See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = 0;
uint64_t slop = 0;
/*
* Make sure spa_dedup_dspace has been set.
*/
if (spa->spa_dedup_dspace == ~0ULL)
spa_update_dspace(spa);
/*
* spa_get_dspace() includes the space only logically "used" by
* deduplicated data, so since it's not useful to reserve more
* space with more deduplicated data, we subtract that out here.
*/
space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
slop = MIN(space >> spa_slop_shift, spa_max_slop);
/*
* Subtract the embedded log space, but no more than half the (3.2%)
* unusable space. Note, the "no more than half" is only relevant if
* zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
* default.
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);
/*
* Slop space should be at least spa_min_slop, but no more than half
* the entire pool.
*/
slop = MAX(slop, MIN(space >> 1, spa_min_slop));
return (slop);
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
uint64_t
spa_get_checkpoint_space(spa_t *spa)
{
return (spa->spa_checkpoint_info.sci_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa) + brt_get_dspace(spa);
if (spa->spa_nonallocating_dspace > 0) {
/*
* Subtract the space provided by all non-allocating vdevs that
* contribute to dspace. If a file is overwritten, its old
* blocks are freed and new blocks are allocated. If there are
* no snapshots of the file, the available space should remain
* the same. The old blocks could be freed from the
* non-allocating vdev, but the new blocks must be allocated on
* other (allocating) vdevs. By reserving the entire size of
* the non-allocating vdevs (including allocated space), we
* ensure that there will be enough space on the allocating
* vdevs for this file overwrite to succeed.
*
* Note that the DMU/DSL doesn't actually know or care
* how much space is allocated (it does its own tracking
* of how much space has been logically used). So it
* doesn't matter that the data we are moving may be
* allocated twice (on the old device and the new device).
*/
ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
spa->spa_dspace -= spa->spa_nonallocating_dspace;
}
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
metaslab_class_t *
spa_embedded_log_class(spa_t *spa)
{
return (spa->spa_embedded_log_class);
}
metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
}
metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
return (spa->spa_dedup_class);
}
/*
* Locate an appropriate allocation class
*/
metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
/*
* ZIL allocations determine their class in zio_alloc_zil().
*/
ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
if (DMU_OT_IS_DDT(objtype)) {
if (spa->spa_dedup_class->mc_groups != 0)
return (spa_dedup_class(spa));
else if (has_special_class && zfs_ddt_data_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/* Indirect blocks for user data can land in special if allowed */
if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
if (has_special_class && zfs_user_indirect_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
if (DMU_OT_IS_METADATA(objtype) || level > 0) {
if (has_special_class)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/*
* Allow small file blocks in special class in some cases (like
* for the dRAID vdev feature). But always leave a reserve of
* zfs_special_class_metadata_reserve_pct exclusively for metadata.
*/
if (DMU_OT_IS_FILE(objtype) &&
has_special_class && size <= special_smallblk) {
metaslab_class_t *special = spa_special_class(spa);
uint64_t alloc = metaslab_class_get_alloc(special);
uint64_t space = metaslab_class_get_space(special);
uint64_t limit =
(space * (100 - zfs_special_class_metadata_reserve_pct))
/ 100;
if (alloc < limit)
return (special);
}
return (spa_normal_class(spa));
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
spa_autotrim_t
spa_get_autotrim(spa_t *spa)
{
return (spa->spa_autotrim);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
void
spa_set_deadman_ziotime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
}
}
void
spa_set_deadman_synctime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
}
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
uint64_t
spa_dirty_data(spa_t *spa)
{
return (spa->spa_dsl_pool->dp_dirty_total);
}
/*
* ==========================================================================
* SPA Import Progress Routines
* ==========================================================================
*/
typedef struct spa_import_progress {
uint64_t pool_guid; /* unique id for updates */
char *pool_name;
spa_load_state_t spa_load_state;
uint64_t mmp_sec_remaining; /* MMP activity check */
uint64_t spa_load_max_txg; /* rewind txg */
procfs_list_node_t smh_node;
} spa_import_progress_t;
spa_history_list_t *spa_import_progress_list = NULL;
static int
spa_import_progress_show_header(struct seq_file *f)
{
seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
"load_state", "multihost_secs", "max_txg",
"pool_name");
return (0);
}
static int
spa_import_progress_show(struct seq_file *f, void *data)
{
spa_import_progress_t *sip = (spa_import_progress_t *)data;
seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
(u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
(u_longlong_t)sip->mmp_sec_remaining,
(u_longlong_t)sip->spa_load_max_txg,
(sip->pool_name ? sip->pool_name : "-"));
return (0);
}
/* Remove oldest elements from list until there are no more than 'size' left */
static void
spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
{
spa_import_progress_t *sip;
while (shl->size > size) {
sip = list_remove_head(&shl->procfs_list.pl_list);
if (sip->pool_name)
spa_strfree(sip->pool_name);
kmem_free(sip, sizeof (spa_import_progress_t));
shl->size--;
}
IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
}
static void
spa_import_progress_init(void)
{
spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
KM_SLEEP);
spa_import_progress_list->size = 0;
spa_import_progress_list->procfs_list.pl_private =
spa_import_progress_list;
procfs_list_install("zfs",
NULL,
"import_progress",
0644,
&spa_import_progress_list->procfs_list,
spa_import_progress_show,
spa_import_progress_show_header,
NULL,
offsetof(spa_import_progress_t, smh_node));
}
static void
spa_import_progress_destroy(void)
{
spa_history_list_t *shl = spa_import_progress_list;
procfs_list_uninstall(&shl->procfs_list);
spa_import_progress_truncate(shl, 0);
procfs_list_destroy(&shl->procfs_list);
kmem_free(shl, sizeof (spa_history_list_t));
}
int
spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t load_state)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_state = load_state;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_max_txg = load_max_txg;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->mmp_sec_remaining = mmp_sec_remaining;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
/*
* A new import is in progress, add an entry.
*/
void
spa_import_progress_add(spa_t *spa)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
const char *poolname = NULL;
sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
sip->pool_guid = spa_guid(spa);
(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
&poolname);
if (poolname == NULL)
poolname = spa_name(spa);
sip->pool_name = spa_strdup(poolname);
sip->spa_load_state = spa_load_state(spa);
mutex_enter(&shl->procfs_list.pl_lock);
procfs_list_add(&shl->procfs_list, sip);
shl->size++;
mutex_exit(&shl->procfs_list.pl_lock);
}
void
spa_import_progress_remove(uint64_t pool_guid)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
if (sip->pool_name)
spa_strfree(sip->pool_name);
list_remove(&shl->procfs_list.pl_list, sip);
shl->size--;
kmem_free(sip, sizeof (spa_import_progress_t));
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (TREE_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
zfs_refcount_init();
unique_init();
zfs_btree_init();
metaslab_stat_init();
brt_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
- vdev_cache_stat_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
chksum_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
qat_init();
spa_import_progress_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
- vdev_cache_stat_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
chksum_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
brt_fini();
metaslab_stat_fini();
zfs_btree_fini();
unique_fini();
zfs_refcount_fini();
fm_fini();
scan_fini();
qat_fini();
spa_import_progress_destroy();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_groups != 0);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
!txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
spa_mode_t
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_errorscrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
// error scrub stats
spa->spa_scan_pass_errorscrub_spent_paused = 0;
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
return (SET_ERROR(ENOENT));
memset(ps, 0, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_to_process = scn->scn_phys.scn_to_process;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
/* error scrub data stored on disk */
ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
ps->pss_error_scrub_to_be_examined =
scn->errorscrub_phys.dep_to_examine;
/* error scrub data not stored on disk */
ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
return (0);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
*/
uint64_t
spa_get_last_removal_txg(spa_t *spa)
{
uint64_t vdevid;
uint64_t ret = -1ULL;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
* sr_prev_indirect_vdev is only modified while holding all the
* config locks, so it is sufficient to hold SCL_VDEV as reader when
* examining it.
*/
vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
while (vdevid != -1ULL) {
vdev_t *vd = vdev_lookup_top(spa, vdevid);
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
/*
* If the removal did not remap any data, we don't care.
*/
if (vdev_indirect_births_count(vib) != 0) {
ret = vdev_indirect_births_last_entry_txg(vib);
break;
}
vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
IMPLY(ret != -1ULL,
spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
return (ret);
}
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
uint32_t
spa_get_hostid(spa_t *spa)
{
return (spa->spa_hostid);
}
boolean_t
spa_trust_config(spa_t *spa)
{
return (spa->spa_trust_config);
}
uint64_t
spa_missing_tvds_allowed(spa_t *spa)
{
return (spa->spa_missing_tvds_allowed);
}
space_map_t *
spa_syncing_log_sm(spa_t *spa)
{
return (spa->spa_syncing_log_sm);
}
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
spa->spa_missing_tvds = missing;
}
/*
* Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
*/
const char *
spa_state_to_name(spa_t *spa)
{
ASSERT3P(spa, !=, NULL);
/*
* it is possible for the spa to exist, without root vdev
* as the spa transitions during import/export
*/
vdev_t *rvd = spa->spa_root_vdev;
if (rvd == NULL) {
return ("TRANSITIONING");
}
vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa) &&
(spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
return ("SUSPENDED");
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return ("OFFLINE");
case VDEV_STATE_REMOVED:
return ("REMOVED");
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return ("FAULTED");
else if (aux == VDEV_AUX_SPLIT_POOL)
return ("SPLIT");
else
return ("UNAVAIL");
case VDEV_STATE_FAULTED:
return ("FAULTED");
case VDEV_STATE_DEGRADED:
return ("DEGRADED");
case VDEV_STATE_HEALTHY:
return ("ONLINE");
default:
break;
}
return ("UNKNOWN");
}
boolean_t
spa_top_vdevs_spacemap_addressable(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
spa_has_checkpoint(spa_t *spa)
{
return (spa->spa_checkpoint_txg != 0);
}
boolean_t
spa_importing_readonly_checkpoint(spa_t *spa)
{
return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
spa->spa_mode == SPA_MODE_READ);
}
uint64_t
spa_min_claim_txg(spa_t *spa)
{
uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
if (checkpoint_txg != 0)
return (checkpoint_txg + 1);
return (spa->spa_first_txg);
}
/*
* If there is a checkpoint, async destroys may consume more space from
* the pool instead of freeing it. In an attempt to save the pool from
* getting suspended when it is about to run out of space, we stop
* processing async destroys.
*/
boolean_t
spa_suspend_async_destroy(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t unreserved = dsl_pool_unreserved_space(dp,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
if (spa_has_checkpoint(spa) && avail == 0)
return (B_TRUE);
return (B_FALSE);
}
#if defined(_KERNEL)
int
param_set_deadman_failmode_common(const char *val)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
}
return (0);
}
#endif
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_special_class);
EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);
EXPORT_SYMBOL(spa_state_to_name);
EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
EXPORT_SYMBOL(spa_min_claim_txg);
EXPORT_SYMBOL(spa_suspend_async_destroy);
EXPORT_SYMBOL(spa_has_checkpoint);
EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
"Set additional debugging flags");
ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
"Set to attempt to recover from fatal errors");
ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
"Set to ignore IO errors during free and permanently leak the space");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
"Dead I/O check interval in milliseconds");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
"Enable deadman timer");
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
"SPA size estimate multiplication factor");
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
"Place DDT data into the special class");
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
"Pool sync expiration time in milliseconds");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
"IO expiration time in milliseconds");
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much "
"free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_uint, ZMOD_RW, "Reserved free space in pool");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index c243dddb7e61..612e66c3a8a8 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,6372 +1,6378 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright (c) 2021, Klara Inc.
- * Copyright [2021] Hewlett Packard Enterprise Development LP
+ * Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
#include "zfs_prop.h"
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
static uint_t zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
static uint_t zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
static uint_t zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
static uint_t zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
static uint_t zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static uint_t zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
static unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
static unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
static int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
/*
* Maximum and minimum ashift values that can be automatically set based on
* vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX
* is higher than the maximum value, it is intentionally limited here to not
* excessively impact pool space efficiency. Higher ashift values may still
* be forced by vdev logical ashift or by user via ashift property, but won't
* be set automatically as a performance optimization.
*/
uint_t zfs_vdev_max_auto_ashift = 14;
uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *const vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, *const *opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
(void) vd, (void) remain_rs;
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
static int
vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
uint64_t objid;
int err;
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (EINVAL);
}
err = zap_lookup(mos, objid, vdev_prop_to_name(prop),
sizeof (uint64_t), 1, value);
if (err == ENOENT)
*value = vdev_prop_default_numeric(prop);
return (err);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
memcpy(newchild, pvd->vdev_child, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
/*
* Default Thresholds for tuning ZED
*/
vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N);
vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T);
vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N);
vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
- vdev_cache_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
const char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
const char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
const char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0)
vd->vdev_path = spa_strdup(tmp);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0)
vd->vdev_devid = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0)
vd->vdev_physpath = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&tmp) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(tmp);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0)
vd->vdev_fru = spa_strdup(tmp);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
if (vd->vdev_ops == &vdev_root_ops &&
(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP,
&vd->vdev_root_zap);
}
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
&vd->vdev_noalloc);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
const char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
- vdev_cache_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_autotrim_kick_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_noalloc = svd->vdev_noalloc;
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_noalloc = 0;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is marked as non-allocating then don't
* activate the metaslabs since we want to ensure that
* no allocations are performed on this device.
*/
if (vd->vdev_noalloc) {
/* track non-allocating vdev space */
spa->spa_nonallocating_dspace += spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
} else if (!expanding) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
- ZIO_FLAG_TRYHARD;
+ ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
(void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
/*
* Choose the best of two ashifts, preferring one between logical ashift
* (absolute minimum) and administrator defined maximum, otherwise take
* the biggest of the two.
*/
uint64_t
vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b)
{
if (a > logical && a <= zfs_vdev_max_auto_ashift) {
if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (a);
else
return (MAX(a, b));
} else if (b <= logical || b > zfs_vdev_max_auto_ashift)
return (MAX(a, b));
return (b);
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift &&
vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/* Keep the device in removed state if unplugged */
if (error == ENOENT && vd->vdev_removed) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED,
VDEV_AUX_NONE);
return (error);
}
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd) {
vdev_ashift_optimize(vd);
}
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
char *old, *new;
if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
"from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
dvd->vdev_path, svd->vdev_path);
spa_strfree(dvd->vdev_path);
dvd->vdev_path = spa_strdup(svd->vdev_path);
}
} else if (svd->vdev_path != NULL) {
dvd->vdev_path = spa_strdup(svd->vdev_path);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
}
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
- vdev_cache_purge(vd);
-
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
+ /*
+ * Recheck if resilver is still needed and cancel any
+ * scheduled resilver if resilver is unneeded.
+ */
+ if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
+ spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
+ mutex_enter(&spa->spa_async_lock);
+ spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
+ mutex_exit(&spa->spa_async_lock);
+ }
+
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
(void) dva, (void) psize;
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
/*
* Iterate over all the vdevs except spare, and post kobj events
*/
void
vdev_post_kobj_evt(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_kobj_evt_post &&
vd->vdev_kobj_flag == B_FALSE) {
vd->vdev_kobj_flag = B_TRUE;
vd->vdev_ops->vdev_op_kobj_evt_post(vd);
}
for (int c = 0; c < vd->vdev_children; c++)
vdev_post_kobj_evt(vd->vdev_child[c]);
}
/*
* Iterate over all the vdevs except spare, and clear kobj events
*/
void
vdev_clear_kobj_evt(vdev_t *vd)
{
vd->vdev_kobj_flag = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear_kobj_evt(vd->vdev_child[c]);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
/*
* If the dtl cannot be sync'd there is no need to open it.
*/
if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
return (0);
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 &&
spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) {
if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2))
spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx);
vd->vdev_root_zap = vdev_create_link_zap(vd, tx);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
return (error);
}
}
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
uint64_t failfast;
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast),
1, &failfast);
if (error == 0) {
vd->vdev_failfast = failfast & 1;
} else if (error == ENOENT) {
vd->vdev_failfast = vdev_prop_default_numeric(
VDEV_PROP_FAILFAST);
} else {
vdev_dbgmsg(vd,
"vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]",
(u_longlong_t)vd->vdev_top_zap, error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) {
uint64_t zapobj;
if (vd->vdev_top_zap != 0)
zapobj = vd->vdev_top_zap;
else
zapobj = vd->vdev_leaf_zap;
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N,
&vd->vdev_checksum_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T,
&vd->vdev_checksum_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_N,
&vd->vdev_io_n);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
error = vdev_prop_get_int(vd, VDEV_PROP_IO_T,
&vd->vdev_io_t);
if (error && error != ENOENT)
vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) "
"failed [error=%d]", (u_longlong_t)zapobj, error);
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_remove_wanted(spa_t *spa, uint64_t guid)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
/*
* If the vdev is already removed, or expanding which can trigger
* repartition add/remove events, then don't do anything.
*/
if (vd->vdev_removed || vd->vdev_expanding)
return (spa_vdev_state_exit(spa, NULL, 0));
/*
* Confirm the vdev has been removed, otherwise don't do anything.
*/
if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL)))
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
vd->vdev_remove_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_REMOVE);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED)) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
/*
* Asynchronously detach spare vdev if resilver or
* rebuild is not required
*/
if (vd->vdev_unspare &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
!vdev_rebuild_active(tvd))
spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect or removed vdev.
*/
if (!vdev_is_concrete(vd) || vd->vdev_removed)
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
(void) cvd;
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
vsx->vsx_active_queue[t] =
vd->vdev_queue.vq_class[t].vqc_active;
vsx->vsx_pend_queue[t] = avl_numnodes(
&vd->vdev_queue.vq_class[t].vqc_queued_tree);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
memcpy(vs, &vd->vdev_stat, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_pspace = vd->vdev_psize;
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
if (vd->vdev_physical_ashift <= ASHIFT_MAX)
vs->vs_physical_ashift = vd->vdev_physical_ashift;
else
vs->vs_physical_ashift = 0;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
vs->vs_noalloc = MAX(vd->vdev_noalloc,
tvd ? tvd->vdev_noalloc : 0);
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
/* Suppress ASAN false positive */
#ifdef __SANITIZE_ADDRESS__
vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL;
vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL;
#else
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
#endif
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM,
* ZIO_PRIORITY_REBUILD.
*/
if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
(void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
VERIFY3U(pvd->vdev_children, >, 1);
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
ASSERT3P(pvd->vdev_child, !=, NULL);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, const char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (avl_numnodes(&vq->vq_active_tree) > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %lu active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
static char *
vdev_name(vdev_t *vd, char *buf, int buflen)
{
if (vd->vdev_path == NULL) {
if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) {
strlcpy(buf, vd->vdev_spa->spa_name, buflen);
} else if (!vd->vdev_ops->vdev_op_leaf) {
snprintf(buf, buflen, "%s-%llu",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id);
}
} else {
strlcpy(buf, vd->vdev_path, buflen);
}
return (buf);
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval,
uint64_t intval, zprop_source_t src)
{
nvlist_t *propval;
propval = fnvlist_alloc();
fnvlist_add_uint64(propval, ZPROP_SOURCE, src);
if (strval != NULL)
fnvlist_add_string(propval, ZPROP_VALUE, strval);
else
fnvlist_add_uint64(propval, ZPROP_VALUE, intval);
fnvlist_add_nvlist(nvl, propname, propval);
nvlist_free(propval);
}
static void
vdev_props_set_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd;
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV);
nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS);
vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
/* this vdev could get removed while waiting for this sync task */
if (vd == NULL)
return;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
uint64_t intval, objid = 0;
const char *strval;
vdev_prop_t prop;
const char *propname = nvpair_name(elem);
zprop_type_t proptype;
/*
* Set vdev property values in the vdev props mos object.
*/
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
/*
* XXX: implement vdev_props_set_check()
*/
panic("vdev not root/top/leaf");
}
switch (prop = vdev_name_to_prop(propname)) {
case VDEV_PROP_USERPROP:
if (vdev_prop_user(propname)) {
strval = fnvpair_value_string(elem);
if (strlen(strval) == 0) {
/* remove the property if value == "" */
(void) zap_remove(mos, objid, propname,
tx);
} else {
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
}
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
}
break;
default:
/* normalize the property name */
propname = vdev_prop_to_name(prop);
proptype = vdev_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos, objid, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%s",
(u_longlong_t)vdev_guid, nvpair_name(elem),
strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(vdev_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos, objid, propname,
sizeof (uint64_t), 1, &intval, tx));
spa_history_log_internal(spa, "vdev set", tx,
"vdev_guid=%llu: %s=%lld",
(u_longlong_t)vdev_guid,
nvpair_name(elem), (longlong_t)intval);
} else {
panic("invalid vdev property type %u",
nvpair_type(elem));
}
}
}
mutex_exit(&spa->spa_props_lock);
}
int
vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
nvpair_t *elem = NULL;
uint64_t vdev_guid;
nvlist_t *nvprops;
int error = 0;
ASSERT(vd != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS,
&nvprops) != 0)
return (SET_ERROR(EINVAL));
if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
return (SET_ERROR(EINVAL));
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
const char *propname = nvpair_name(elem);
vdev_prop_t prop = vdev_name_to_prop(propname);
uint64_t intval = 0;
const char *strval = NULL;
if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) {
error = EINVAL;
goto end;
}
if (vdev_prop_readonly(prop)) {
error = EROFS;
goto end;
}
/* Special Processing */
switch (prop) {
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL) {
error = EROFS;
break;
}
if (nvpair_value_string(elem, &strval) != 0) {
error = EINVAL;
break;
}
/* New path must start with /dev/ */
if (strncmp(strval, "/dev/", 5)) {
error = EINVAL;
break;
}
error = spa_vdev_setpath(spa, vdev_guid, strval);
break;
case VDEV_PROP_ALLOCATING:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
if (intval != vd->vdev_noalloc)
break;
if (intval == 0)
error = spa_vdev_noalloc(spa, vdev_guid);
else
error = spa_vdev_alloc(spa, vdev_guid);
break;
case VDEV_PROP_FAILFAST:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_failfast = intval & 1;
break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_n = intval;
break;
case VDEV_PROP_CHECKSUM_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_checksum_t = intval;
break;
case VDEV_PROP_IO_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_n = intval;
break;
case VDEV_PROP_IO_T:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
break;
}
vd->vdev_io_t = intval;
break;
default:
/* Most processing is done in vdev_props_set_sync */
break;
}
end:
if (error != 0) {
intval = error;
vdev_prop_add_list(outnvl, propname, strval, intval, 0);
return (error);
}
}
return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
int
vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int err = 0;
uint64_t objid;
uint64_t vdev_guid;
nvpair_t *elem = NULL;
nvlist_t *nvprops = NULL;
uint64_t intval = 0;
char *strval = NULL;
const char *propname = NULL;
vdev_prop_t prop;
ASSERT(vd != NULL);
ASSERT(mos != NULL);
if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV,
&vdev_guid) != 0)
return (SET_ERROR(EINVAL));
nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops);
if (vd->vdev_root_zap != 0) {
objid = vd->vdev_root_zap;
} else if (vd->vdev_top_zap != 0) {
objid = vd->vdev_top_zap;
} else if (vd->vdev_leaf_zap != 0) {
objid = vd->vdev_leaf_zap;
} else {
return (SET_ERROR(EINVAL));
}
ASSERT(objid != 0);
mutex_enter(&spa->spa_props_lock);
if (nvprops != NULL) {
char namebuf[64] = { 0 };
while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) {
intval = 0;
strval = NULL;
propname = nvpair_name(elem);
prop = vdev_name_to_prop(propname);
zprop_source_t src = ZPROP_SRC_DEFAULT;
uint64_t integer_size, num_integers;
switch (prop) {
/* Special Read-only Properties */
case VDEV_PROP_NAME:
strval = vdev_name(vd, namebuf,
sizeof (namebuf));
if (strval == NULL)
continue;
vdev_prop_add_list(outnvl, propname, strval, 0,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CAPACITY:
/* percent used */
intval = (vd->vdev_stat.vs_dspace == 0) ? 0 :
(vd->vdev_stat.vs_alloc * 100 /
vd->vdev_stat.vs_dspace);
vdev_prop_add_list(outnvl, propname, NULL,
intval, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_STATE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_state, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_GUID:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_guid, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_asize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PSIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_psize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ASHIFT:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_ashift, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_SIZE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_dspace -
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ALLOCATED:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_EXPANDSZ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_esize, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRAGMENTATION:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_fragmentation,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARITY:
vdev_prop_add_list(outnvl, propname, NULL,
vdev_get_nparity(vd), ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PATH:
if (vd->vdev_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_DEVID:
if (vd->vdev_devid == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_devid, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PHYS_PATH:
if (vd->vdev_physpath == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_physpath, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_ENC_PATH:
if (vd->vdev_enc_sysfs_path == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_FRU:
if (vd->vdev_fru == NULL)
continue;
vdev_prop_add_list(outnvl, propname,
vd->vdev_fru, 0, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_PARENT:
if (vd->vdev_parent != NULL) {
strval = vdev_name(vd->vdev_parent,
namebuf, sizeof (namebuf));
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
}
continue;
case VDEV_PROP_CHILDREN:
if (vd->vdev_children > 0)
strval = kmem_zalloc(ZAP_MAXVALUELEN,
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
if (vname == NULL)
vname = "(unknown)";
if (strlen(strval) > 0)
strlcat(strval, ",",
ZAP_MAXVALUELEN);
strlcat(strval, vname, ZAP_MAXVALUELEN);
}
if (strval != NULL) {
vdev_prop_add_list(outnvl, propname,
strval, 0, ZPROP_SRC_NONE);
kmem_free(strval, ZAP_MAXVALUELEN);
}
continue;
case VDEV_PROP_NUMCHILDREN:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_children, ZPROP_SRC_NONE);
continue;
case VDEV_PROP_READ_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_read_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_WRITE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_write_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_CHECKSUM_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_checksum_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_INITIALIZE_ERRORS:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_initialize_errors,
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_OPS_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_NULL:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_READ:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_READ],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_WRITE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_FREE:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_CLAIM:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_BYTES_TRIM:
/*
* TRIM ops and bytes are reported to user
* space as ZIO_TYPE_IOCTL. This is done to
* preserve the vdev_stat_t structure layout
* for user space.
*/
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL],
ZPROP_SRC_NONE);
continue;
case VDEV_PROP_REMOVING:
vdev_prop_add_list(outnvl, propname, NULL,
vd->vdev_removing, ZPROP_SRC_NONE);
continue;
/* Numeric Properites */
case VDEV_PROP_ALLOCATING:
/* Leaf vdevs cannot have this property */
if (vd->vdev_mg == NULL &&
vd->vdev_top != NULL) {
src = ZPROP_SRC_NONE;
intval = ZPROP_BOOLEAN_NA;
} else {
err = vdev_prop_get_int(vd, prop,
&intval);
if (err && err != ENOENT)
break;
if (intval ==
vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
}
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
case VDEV_PROP_FAILFAST:
src = ZPROP_SRC_LOCAL;
strval = NULL;
err = zap_lookup(mos, objid, nvpair_name(elem),
sizeof (uint64_t), 1, &intval);
if (err == ENOENT) {
intval = vdev_prop_default_numeric(
prop);
err = 0;
} else if (err) {
break;
}
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
case VDEV_PROP_IO_T:
err = vdev_prop_get_int(vd, prop, &intval);
if (err && err != ENOENT)
break;
if (intval == vdev_prop_default_numeric(prop))
src = ZPROP_SRC_DEFAULT;
else
src = ZPROP_SRC_LOCAL;
vdev_prop_add_list(outnvl, propname, NULL,
intval, src);
break;
/* Text Properties */
case VDEV_PROP_COMMENT:
/* Exists in the ZAP below */
/* FALLTHRU */
case VDEV_PROP_USERPROP:
/* User Properites */
src = ZPROP_SRC_LOCAL;
err = zap_length(mos, objid, nvpair_name(elem),
&integer_size, &num_integers);
if (err)
break;
switch (integer_size) {
case 8:
/* User properties cannot be integers */
err = EINVAL;
break;
case 1:
/* string property */
strval = kmem_alloc(num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid,
nvpair_name(elem), 1,
num_integers, strval);
if (err) {
kmem_free(strval,
num_integers);
break;
}
vdev_prop_add_list(outnvl, propname,
strval, 0, src);
kmem_free(strval, num_integers);
break;
}
break;
default:
err = ENOENT;
break;
}
if (err)
break;
}
} else {
/*
* Get all properties from the MOS vdev property object.
*/
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, mos, objid);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
intval = 0;
strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
propname = za.za_name;
switch (za.za_integer_length) {
case 8:
/* We do not allow integer user properties */
/* This is likely an internal value */
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, objid, za.za_name, 1,
za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
vdev_prop_add_list(outnvl, propname, strval, 0,
src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
}
mutex_exit(&spa->spa_props_lock);
if (err && err != ENOENT) {
return (err);
}
return (0);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW,
"Default lower limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW,
"Default upper limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold).");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_cache.c b/sys/contrib/openzfs/module/zfs/vdev_cache.c
deleted file mode 100644
index f0a17600d58e..000000000000
--- a/sys/contrib/openzfs/module/zfs/vdev_cache.c
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or https://opensource.org/licenses/CDDL-1.0.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
- */
-
-#include <sys/zfs_context.h>
-#include <sys/spa.h>
-#include <sys/vdev_impl.h>
-#include <sys/zio.h>
-#include <sys/kstat.h>
-#include <sys/abd.h>
-
-/*
- * Virtual device read-ahead caching.
- *
- * This file implements a simple LRU read-ahead cache. When the DMU reads
- * a given block, it will often want other, nearby blocks soon thereafter.
- * We take advantage of this by reading a larger disk region and caching
- * the result. In the best case, this can turn 128 back-to-back 512-byte
- * reads into a single 64k read followed by 127 cache hits; this reduces
- * latency dramatically. In the worst case, it can turn an isolated 512-byte
- * read into a 64k read, which doesn't affect latency all that much but is
- * terribly wasteful of bandwidth. A more intelligent version of the cache
- * could keep track of access patterns and not do read-ahead unless it sees
- * at least two temporally close I/Os to the same region. Currently, only
- * metadata I/O is inflated. A further enhancement could take advantage of
- * more semantic information about the I/O. And it could use something
- * faster than an AVL tree; that was chosen solely for convenience.
- *
- * There are five cache operations: allocate, fill, read, write, evict.
- *
- * (1) Allocate. This reserves a cache entry for the specified region.
- * We separate the allocate and fill operations so that multiple threads
- * don't generate I/O for the same cache miss.
- *
- * (2) Fill. When the I/O for a cache miss completes, the fill routine
- * places the data in the previously allocated cache entry.
- *
- * (3) Read. Read data from the cache.
- *
- * (4) Write. Update cache contents after write completion.
- *
- * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
- * if the total cache size exceeds zfs_vdev_cache_size.
- */
-
-/*
- * These tunables are for performance analysis.
- */
-/*
- * All i/os smaller than zfs_vdev_cache_max will be turned into
- * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
- * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
- * vdev's vdev_cache.
- *
- * TODO: Note that with the current ZFS code, it turns out that the
- * vdev cache is not helpful, and in some cases actually harmful. It
- * is better if we disable this. Once some time has passed, we should
- * actually remove this to simplify the code. For now we just disable
- * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
- * has made these same changes.
- */
-static uint_t zfs_vdev_cache_max = 1 << 14; /* 16KB */
-static uint_t zfs_vdev_cache_size = 0;
-static uint_t zfs_vdev_cache_bshift = 16;
-
-#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
-
-static kstat_t *vdc_ksp = NULL;
-
-typedef struct vdc_stats {
- kstat_named_t vdc_stat_delegations;
- kstat_named_t vdc_stat_hits;
- kstat_named_t vdc_stat_misses;
-} vdc_stats_t;
-
-static vdc_stats_t vdc_stats = {
- { "delegations", KSTAT_DATA_UINT64 },
- { "hits", KSTAT_DATA_UINT64 },
- { "misses", KSTAT_DATA_UINT64 }
-};
-
-#define VDCSTAT_BUMP(stat) atomic_inc_64(&vdc_stats.stat.value.ui64);
-
-static inline int
-vdev_cache_offset_compare(const void *a1, const void *a2)
-{
- const vdev_cache_entry_t *ve1 = (const vdev_cache_entry_t *)a1;
- const vdev_cache_entry_t *ve2 = (const vdev_cache_entry_t *)a2;
-
- return (TREE_CMP(ve1->ve_offset, ve2->ve_offset));
-}
-
-static int
-vdev_cache_lastused_compare(const void *a1, const void *a2)
-{
- const vdev_cache_entry_t *ve1 = (const vdev_cache_entry_t *)a1;
- const vdev_cache_entry_t *ve2 = (const vdev_cache_entry_t *)a2;
-
- int cmp = TREE_CMP(ve1->ve_lastused, ve2->ve_lastused);
- if (likely(cmp))
- return (cmp);
-
- /*
- * Among equally old entries, sort by offset to ensure uniqueness.
- */
- return (vdev_cache_offset_compare(a1, a2));
-}
-
-/*
- * Evict the specified entry from the cache.
- */
-static void
-vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
-{
- ASSERT(MUTEX_HELD(&vc->vc_lock));
- ASSERT3P(ve->ve_fill_io, ==, NULL);
- ASSERT3P(ve->ve_abd, !=, NULL);
-
- avl_remove(&vc->vc_lastused_tree, ve);
- avl_remove(&vc->vc_offset_tree, ve);
- abd_free(ve->ve_abd);
- kmem_free(ve, sizeof (vdev_cache_entry_t));
-}
-
-/*
- * Allocate an entry in the cache. At the point we don't have the data,
- * we're just creating a placeholder so that multiple threads don't all
- * go off and read the same blocks.
- */
-static vdev_cache_entry_t *
-vdev_cache_allocate(zio_t *zio)
-{
- vdev_cache_t *vc = &zio->io_vd->vdev_cache;
- uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
- vdev_cache_entry_t *ve;
-
- ASSERT(MUTEX_HELD(&vc->vc_lock));
-
- if (zfs_vdev_cache_size == 0)
- return (NULL);
-
- /*
- * If adding a new entry would exceed the cache size,
- * evict the oldest entry (LRU).
- */
- if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
- zfs_vdev_cache_size) {
- ve = avl_first(&vc->vc_lastused_tree);
- if (ve->ve_fill_io != NULL)
- return (NULL);
- ASSERT3U(ve->ve_hits, !=, 0);
- vdev_cache_evict(vc, ve);
- }
-
- ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
- ve->ve_offset = offset;
- ve->ve_lastused = ddi_get_lbolt();
- ve->ve_abd = abd_alloc_for_io(VCBS, B_TRUE);
-
- avl_add(&vc->vc_offset_tree, ve);
- avl_add(&vc->vc_lastused_tree, ve);
-
- return (ve);
-}
-
-static void
-vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
-{
- uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
-
- ASSERT(MUTEX_HELD(&vc->vc_lock));
- ASSERT3P(ve->ve_fill_io, ==, NULL);
-
- if (ve->ve_lastused != ddi_get_lbolt()) {
- avl_remove(&vc->vc_lastused_tree, ve);
- ve->ve_lastused = ddi_get_lbolt();
- avl_add(&vc->vc_lastused_tree, ve);
- }
-
- ve->ve_hits++;
- abd_copy_off(zio->io_abd, ve->ve_abd, 0, cache_phase, zio->io_size);
-}
-
-/*
- * Fill a previously allocated cache entry with data.
- */
-static void
-vdev_cache_fill(zio_t *fio)
-{
- vdev_t *vd = fio->io_vd;
- vdev_cache_t *vc = &vd->vdev_cache;
- vdev_cache_entry_t *ve = fio->io_private;
- zio_t *pio;
-
- ASSERT3U(fio->io_size, ==, VCBS);
-
- /*
- * Add data to the cache.
- */
- mutex_enter(&vc->vc_lock);
-
- ASSERT3P(ve->ve_fill_io, ==, fio);
- ASSERT3U(ve->ve_offset, ==, fio->io_offset);
- ASSERT3P(ve->ve_abd, ==, fio->io_abd);
-
- ve->ve_fill_io = NULL;
-
- /*
- * Even if this cache line was invalidated by a missed write update,
- * any reads that were queued up before the missed update are still
- * valid, so we can satisfy them from this line before we evict it.
- */
- zio_link_t *zl = NULL;
- while ((pio = zio_walk_parents(fio, &zl)) != NULL)
- vdev_cache_hit(vc, ve, pio);
-
- if (fio->io_error || ve->ve_missed_update)
- vdev_cache_evict(vc, ve);
-
- mutex_exit(&vc->vc_lock);
-}
-
-/*
- * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss.
- */
-boolean_t
-vdev_cache_read(zio_t *zio)
-{
- vdev_cache_t *vc = &zio->io_vd->vdev_cache;
- vdev_cache_entry_t *ve, ve_search;
- uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
- zio_t *fio;
- uint64_t cache_phase __maybe_unused = P2PHASE(zio->io_offset, VCBS);
-
- ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
-
- if (zfs_vdev_cache_size == 0)
- return (B_FALSE);
-
- if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
- return (B_FALSE);
-
- if (zio->io_size > zfs_vdev_cache_max)
- return (B_FALSE);
-
- /*
- * If the I/O straddles two or more cache blocks, don't cache it.
- */
- if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
- return (B_FALSE);
-
- ASSERT3U(cache_phase + zio->io_size, <=, VCBS);
-
- mutex_enter(&vc->vc_lock);
-
- ve_search.ve_offset = cache_offset;
- ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL);
-
- if (ve != NULL) {
- if (ve->ve_missed_update) {
- mutex_exit(&vc->vc_lock);
- return (B_FALSE);
- }
-
- if ((fio = ve->ve_fill_io) != NULL) {
- zio_vdev_io_bypass(zio);
- zio_add_child(zio, fio);
- mutex_exit(&vc->vc_lock);
- VDCSTAT_BUMP(vdc_stat_delegations);
- return (B_TRUE);
- }
-
- vdev_cache_hit(vc, ve, zio);
- zio_vdev_io_bypass(zio);
-
- mutex_exit(&vc->vc_lock);
- VDCSTAT_BUMP(vdc_stat_hits);
- return (B_TRUE);
- }
-
- ve = vdev_cache_allocate(zio);
-
- if (ve == NULL) {
- mutex_exit(&vc->vc_lock);
- return (B_FALSE);
- }
-
- fio = zio_vdev_delegated_io(zio->io_vd, cache_offset,
- ve->ve_abd, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_NOW,
- ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve);
-
- ve->ve_fill_io = fio;
- zio_vdev_io_bypass(zio);
- zio_add_child(zio, fio);
-
- mutex_exit(&vc->vc_lock);
- zio_nowait(fio);
- VDCSTAT_BUMP(vdc_stat_misses);
-
- return (B_TRUE);
-}
-
-/*
- * Update cache contents upon write completion.
- */
-void
-vdev_cache_write(zio_t *zio)
-{
- vdev_cache_t *vc = &zio->io_vd->vdev_cache;
- vdev_cache_entry_t *ve, ve_search;
- uint64_t io_start = zio->io_offset;
- uint64_t io_end = io_start + zio->io_size;
- uint64_t min_offset = P2ALIGN(io_start, VCBS);
- uint64_t max_offset = P2ROUNDUP(io_end, VCBS);
- avl_index_t where;
-
- ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
-
- mutex_enter(&vc->vc_lock);
-
- ve_search.ve_offset = min_offset;
- ve = avl_find(&vc->vc_offset_tree, &ve_search, &where);
-
- if (ve == NULL)
- ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER);
-
- while (ve != NULL && ve->ve_offset < max_offset) {
- uint64_t start = MAX(ve->ve_offset, io_start);
- uint64_t end = MIN(ve->ve_offset + VCBS, io_end);
-
- if (ve->ve_fill_io != NULL) {
- ve->ve_missed_update = 1;
- } else {
- abd_copy_off(ve->ve_abd, zio->io_abd,
- start - ve->ve_offset, start - io_start,
- end - start);
- }
- ve = AVL_NEXT(&vc->vc_offset_tree, ve);
- }
- mutex_exit(&vc->vc_lock);
-}
-
-void
-vdev_cache_purge(vdev_t *vd)
-{
- vdev_cache_t *vc = &vd->vdev_cache;
- vdev_cache_entry_t *ve;
-
- mutex_enter(&vc->vc_lock);
- while ((ve = avl_first(&vc->vc_offset_tree)) != NULL)
- vdev_cache_evict(vc, ve);
- mutex_exit(&vc->vc_lock);
-}
-
-void
-vdev_cache_init(vdev_t *vd)
-{
- vdev_cache_t *vc = &vd->vdev_cache;
-
- mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL);
-
- avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare,
- sizeof (vdev_cache_entry_t),
- offsetof(struct vdev_cache_entry, ve_offset_node));
-
- avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare,
- sizeof (vdev_cache_entry_t),
- offsetof(struct vdev_cache_entry, ve_lastused_node));
-}
-
-void
-vdev_cache_fini(vdev_t *vd)
-{
- vdev_cache_t *vc = &vd->vdev_cache;
-
- vdev_cache_purge(vd);
-
- avl_destroy(&vc->vc_offset_tree);
- avl_destroy(&vc->vc_lastused_tree);
-
- mutex_destroy(&vc->vc_lock);
-}
-
-void
-vdev_cache_stat_init(void)
-{
- vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc",
- KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t),
- KSTAT_FLAG_VIRTUAL);
- if (vdc_ksp != NULL) {
- vdc_ksp->ks_data = &vdc_stats;
- kstat_install(vdc_ksp);
- }
-}
-
-void
-vdev_cache_stat_fini(void)
-{
- if (vdc_ksp != NULL) {
- kstat_delete(vdc_ksp);
- vdc_ksp = NULL;
- }
-}
-
-ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_max, UINT, ZMOD_RW,
- "Inflate reads small than max");
-
-ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_size, UINT, ZMOD_RD,
- "Total size of the per-disk cache");
-
-ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_bshift, UINT, ZMOD_RW,
- "Shift size to inflate reads too");
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index a16ad2f4e7cf..89667585345d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -1,1912 +1,1911 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2014, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/abd.h>
#include <sys/zthr.h>
/*
* An indirect vdev corresponds to a vdev that has been removed. Since
* we cannot rewrite block pointers of snapshots, etc., we keep a
* mapping from old location on the removed device to the new location
* on another device in the pool and use this mapping whenever we need
* to access the DVA. Unfortunately, this mapping did not respect
* logical block boundaries when it was first created, and so a DVA on
* this indirect vdev may be "split" into multiple sections that each
* map to a different location. As a consequence, not all DVAs can be
* translated to an equivalent new DVA. Instead we must provide a
* "vdev_remap" operation that executes a callback on each contiguous
* segment of the new location. This function is used in multiple ways:
*
* - I/Os to this vdev use the callback to determine where the
* data is now located, and issue child I/Os for each segment's new
* location.
*
* - frees and claims to this vdev use the callback to free or claim
* each mapped segment. (Note that we don't actually need to claim
* log blocks on indirect vdevs, because we don't allocate to
* removing vdevs. However, zdb uses zio_claim() for its leak
* detection.)
*/
/*
* "Big theory statement" for how we mark blocks obsolete.
*
* When a block on an indirect vdev is freed or remapped, a section of
* that vdev's mapping may no longer be referenced (aka "obsolete"). We
* keep track of how much of each mapping entry is obsolete. When
* an entry becomes completely obsolete, we can remove it, thus reducing
* the memory used by the mapping. The complete picture of obsolescence
* is given by the following data structures, described below:
* - the entry-specific obsolete count
* - the vdev-specific obsolete spacemap
* - the pool-specific obsolete bpobj
*
* == On disk data structures used ==
*
* We track the obsolete space for the pool using several objects. Each
* of these objects is created on demand and freed when no longer
* needed, and is assumed to be empty if it does not exist.
* SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
*
* - Each vic_mapping_object (associated with an indirect vdev) can
* have a vimp_counts_object. This is an array of uint32_t's
* with the same number of entries as the vic_mapping_object. When
* the mapping is condensed, entries from the vic_obsolete_sm_object
* (see below) are folded into the counts. Therefore, each
* obsolete_counts entry tells us the number of bytes in the
* corresponding mapping entry that were not referenced when the
* mapping was last condensed.
*
* - Each indirect or removing vdev can have a vic_obsolete_sm_object.
* This is a space map containing an alloc entry for every DVA that
* has been obsoleted since the last time this indirect vdev was
* condensed. We use this object in order to improve performance
* when marking a DVA as obsolete. Instead of modifying an arbitrary
* offset of the vimp_counts_object, we only need to append an entry
* to the end of this object. When a DVA becomes obsolete, it is
* added to the obsolete space map. This happens when the DVA is
* freed, remapped and not referenced by a snapshot, or the last
* snapshot referencing it is destroyed.
*
* - Each dataset can have a ds_remap_deadlist object. This is a
* deadlist object containing all blocks that were remapped in this
* dataset but referenced in a previous snapshot. Blocks can *only*
* appear on this list if they were remapped (dsl_dataset_block_remapped);
* blocks that were killed in a head dataset are put on the normal
* ds_deadlist and marked obsolete when they are freed.
*
* - The pool can have a dp_obsolete_bpobj. This is a list of blocks
* in the pool that need to be marked obsolete. When a snapshot is
* destroyed, we move some of the ds_remap_deadlist to the obsolete
* bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
* asynchronously process the obsolete bpobj, moving its entries to
* the specific vdevs' obsolete space maps.
*
* == Summary of how we mark blocks as obsolete ==
*
* - When freeing a block: if any DVA is on an indirect vdev, append to
* vic_obsolete_sm_object.
* - When remapping a block, add dva to ds_remap_deadlist (if prev snap
* references; otherwise append to vic_obsolete_sm_object).
* - When freeing a snapshot: move parts of ds_remap_deadlist to
* dp_obsolete_bpobj (same algorithm as ds_deadlist).
* - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
* individual vdev's vic_obsolete_sm_object.
*/
/*
* "Big theory statement" for how we condense indirect vdevs.
*
* Condensing an indirect vdev's mapping is the process of determining
* the precise counts of obsolete space for each mapping entry (by
* integrating the obsolete spacemap into the obsolete counts) and
* writing out a new mapping that contains only referenced entries.
*
* We condense a vdev when we expect the mapping to shrink (see
* vdev_indirect_should_condense()), but only perform one condense at a
* time to limit the memory usage. In addition, we use a separate
* open-context thread (spa_condense_indirect_thread) to incrementally
* create the new mapping object in a way that minimizes the impact on
* the rest of the system.
*
* == Generating a new mapping ==
*
* To generate a new mapping, we follow these steps:
*
* 1. Save the old obsolete space map and create a new mapping object
* (see spa_condense_indirect_start_sync()). This initializes the
* spa_condensing_indirect_phys with the "previous obsolete space map",
* which is now read only. Newly obsolete DVAs will be added to a
* new (initially empty) obsolete space map, and will not be
* considered as part of this condense operation.
*
* 2. Construct in memory the precise counts of obsolete space for each
* mapping entry, by incorporating the obsolete space map into the
* counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
*
* 3. Iterate through each mapping entry, writing to the new mapping any
* entries that are not completely obsolete (i.e. which don't have
* obsolete count == mapping length). (See
* spa_condense_indirect_generate_new_mapping().)
*
* 4. Destroy the old mapping object and switch over to the new one
* (spa_condense_indirect_complete_sync).
*
* == Restarting from failure ==
*
* To restart the condense when we import/open the pool, we must start
* at the 2nd step above: reconstruct the precise counts in memory,
* based on the space map + counts. Then in the 3rd step, we start
* iterating where we left off: at vimp_max_offset of the new mapping
* object.
*/
static int zfs_condense_indirect_vdevs_enable = B_TRUE;
/*
* Condense if at least this percent of the bytes in the mapping is
* obsolete. With the default of 25%, the amount of space mapped
* will be reduced to 1% of its original size after at most 16
* condenses. Higher values will condense less often (causing less
* i/o); lower values will reduce the mapping size more quickly.
*/
static uint_t zfs_condense_indirect_obsolete_pct = 25;
/*
* Condense if the obsolete space map takes up more than this amount of
* space on disk (logically). This limits the amount of disk space
* consumed by the obsolete space map; the default of 1GB is small enough
* that we typically don't mind "wasting" it.
*/
static uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
/*
* Don't bother condensing if the mapping uses less than this amount of
* memory. The default of 128KB is considered a "trivial" amount of
* memory and not worth reducing.
*/
static uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a condense (which might otherwise
* complete too quickly). If used to reduce the performance impact of
* condensing in production, a maximum value of 1 should be sufficient.
*/
static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0;
/*
* If an indirect split block contains more than this many possible unique
* combinations when being reconstructed, consider it too computationally
* expensive to check them all. Instead, try at most 100 randomly-selected
* combinations each time the block is accessed. This allows all segment
* copies to participate fairly in the reconstruction when all combinations
* cannot be checked and prevents repeated use of one bad copy.
*/
uint_t zfs_reconstruct_indirect_combinations_max = 4096;
/*
* Enable to simulate damaged segments and validate reconstruction. This
* is intentionally not exposed as a module parameter.
*/
unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
/*
* The indirect_child_t represents the vdev that we will read from, when we
* need to read all copies of the data (e.g. for scrub or reconstruction).
* For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
* ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
* ic_vdev is a child of the mirror.
*/
typedef struct indirect_child {
abd_t *ic_data;
vdev_t *ic_vdev;
/*
* ic_duplicate is NULL when the ic_data contents are unique, when it
* is determined to be a duplicate it references the primary child.
*/
struct indirect_child *ic_duplicate;
list_node_t ic_node; /* node on is_unique_child */
int ic_error; /* set when a child does not contain the data */
} indirect_child_t;
/*
* The indirect_split_t represents one mapped segment of an i/o to the
* indirect vdev. For non-split (contiguously-mapped) blocks, there will be
* only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
* For split blocks, there will be several of these.
*/
typedef struct indirect_split {
list_node_t is_node; /* link on iv_splits */
/*
* is_split_offset is the offset into the i/o.
* This is the sum of the previous splits' is_size's.
*/
uint64_t is_split_offset;
vdev_t *is_vdev; /* top-level vdev */
uint64_t is_target_offset; /* offset on is_vdev */
uint64_t is_size;
int is_children; /* number of entries in is_child[] */
int is_unique_children; /* number of entries in is_unique_child */
list_t is_unique_child;
/*
* is_good_child is the child that we are currently using to
* attempt reconstruction.
*/
indirect_child_t *is_good_child;
indirect_child_t is_child[];
} indirect_split_t;
/*
* The indirect_vsd_t is associated with each i/o to the indirect vdev.
* It is the "Vdev-Specific Data" in the zio_t's io_vsd.
*/
typedef struct indirect_vsd {
boolean_t iv_split_block;
boolean_t iv_reconstruct;
uint64_t iv_unique_combinations;
uint64_t iv_attempts;
uint64_t iv_attempts_max;
list_t iv_splits; /* list of indirect_split_t's */
} indirect_vsd_t;
static void
vdev_indirect_map_free(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
indirect_split_t *is;
- while ((is = list_head(&iv->iv_splits)) != NULL) {
+ while ((is = list_remove_head(&iv->iv_splits)) != NULL) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data != NULL)
abd_free(ic->ic_data);
}
- list_remove(&iv->iv_splits, is);
indirect_child_t *ic;
- while ((ic = list_head(&is->is_unique_child)) != NULL)
- list_remove(&is->is_unique_child, ic);
+ while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
+ ;
list_destroy(&is->is_unique_child);
kmem_free(is,
offsetof(indirect_split_t, is_child[is->is_children]));
}
kmem_free(iv, sizeof (*iv));
}
static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
.vsd_free = vdev_indirect_map_free,
};
/*
* Mark the given offset and size as being obsolete.
*/
void
vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(size > 0);
VERIFY(vdev_indirect_mapping_entry_for_offset(
vd->vdev_indirect_mapping, offset) != NULL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
mutex_enter(&vd->vdev_obsolete_lock);
range_tree_add(vd->vdev_obsolete_segments, offset, size);
mutex_exit(&vd->vdev_obsolete_lock);
vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
}
}
/*
* Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
* wrapper is provided because the DMU does not know about vdev_t's and
* cannot directly call vdev_indirect_mark_obsolete.
*/
void
spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
ASSERT(dmu_tx_is_syncing(tx));
/* The DMU can only remap indirect vdevs. */
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vdev_indirect_mark_obsolete(vd, offset, size);
}
static spa_condensing_indirect_t *
spa_condensing_indirect_create(spa_t *spa)
{
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
objset_t *mos = spa->spa_meta_objset;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&sci->sci_new_mapping_entries[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
sci->sci_new_mapping =
vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
return (sci);
}
static void
spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
{
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&sci->sci_new_mapping_entries[i]);
if (sci->sci_new_mapping != NULL)
vdev_indirect_mapping_close(sci->sci_new_mapping);
kmem_free(sci, sizeof (*sci));
}
boolean_t
vdev_indirect_should_condense(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
if (!zfs_condense_indirect_vdevs_enable)
return (B_FALSE);
/*
* We can only condense one indirect vdev at a time.
*/
if (spa->spa_condensing_indirect != NULL)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
/*
* The mapping object size must not change while we are
* condensing, so we can only condense indirect vdevs
* (not vdevs that are still in the middle of being removed).
*/
if (vd->vdev_ops != &vdev_indirect_ops)
return (B_FALSE);
/*
* If nothing new has been marked obsolete, there is no
* point in condensing.
*/
uint64_t obsolete_sm_obj __maybe_unused;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
if (vd->vdev_obsolete_sm == NULL) {
ASSERT0(obsolete_sm_obj);
return (B_FALSE);
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
uint64_t mapping_size = vdev_indirect_mapping_size(vim);
uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
ASSERT3U(bytes_obsolete, <=, bytes_mapped);
/*
* If a high percentage of the bytes that are mapped have become
* obsolete, condense (unless the mapping is already small enough).
* This has a good chance of reducing the amount of memory used
* by the mapping.
*/
if (bytes_obsolete * 100 / bytes_mapped >=
zfs_condense_indirect_obsolete_pct &&
mapping_size > zfs_condense_min_mapping_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete "
"spacemap covers %d%% of %lluMB mapping",
(u_longlong_t)vd->vdev_id,
(int)(bytes_obsolete * 100 / bytes_mapped),
(u_longlong_t)bytes_mapped / 1024 / 1024);
return (B_TRUE);
}
/*
* If the obsolete space map takes up too much space on disk,
* condense in order to free up this disk space.
*/
if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete sm "
"length %lluMB >= max size %lluMB",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)obsolete_sm_size / 1024 / 1024,
(u_longlong_t)zfs_condense_max_obsolete_bytes /
1024 / 1024);
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This sync task completes (finishes) a condense, deleting the old
* mapping and replacing it with the new one.
*/
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_meta_objset;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
uint64_t new_count =
vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
/*
* Reset vdev_indirect_mapping to refer to the new object.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = sci->sci_new_mapping;
rw_exit(&vd->vdev_indirect_rwlock);
sci->sci_new_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = scip->scip_next_mapping_object;
scip->scip_next_mapping_object = 0;
space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
scip->scip_prev_obsolete_sm_object = 0;
scip->scip_vdev = 0;
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, tx));
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object,
(u_longlong_t)new_count, (u_longlong_t)old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* This sync task appends entries to the new mapping object.
*/
static void
spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
}
/*
* Open-context function to add one entry to the new mapping. The new
* entry will be remembered and written from syncing context.
*/
static void
spa_condense_indirect_commit_entry(spa_t *spa,
vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
/*
* If we are the first entry committed this txg, kick off the sync
* task to write to the MOS on our behalf.
*/
if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
spa_condense_indirect_commit_sync, sci, tx);
}
vdev_indirect_mapping_entry_t *vime =
kmem_alloc(sizeof (*vime), KM_SLEEP);
vime->vime_mapping = *vimep;
vime->vime_obsolete_count = count;
list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
dmu_tx_commit(tx);
}
static void
spa_condense_indirect_generate_new_mapping(vdev_t *vd,
uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
{
spa_t *spa = vd->vdev_spa;
uint64_t mapi = start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_num_entries =
vdev_indirect_mapping_num_entries(old_mapping);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
zfs_dbgmsg("starting condense of vdev %llu from index %llu",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
while (mapi < old_num_entries) {
if (zthr_iscancelled(zthr)) {
zfs_dbgmsg("pausing condense of vdev %llu "
"at index %llu", (u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
break;
}
vdev_indirect_mapping_entry_phys_t *entry =
&old_mapping->vim_entries[mapi];
uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
ASSERT3U(obsolete_counts[mapi], <=, entry_size);
if (obsolete_counts[mapi] < entry_size) {
spa_condense_indirect_commit_entry(spa, entry,
obsolete_counts[mapi]);
/*
* This delay may be requested for testing, debugging,
* or performance reasons.
*/
hrtime_t now = gethrtime();
hrtime_t sleep_until = now + MSEC2NSEC(
zfs_condense_indirect_commit_entry_delay_ms);
zfs_sleep_until(sleep_until);
}
mapi++;
}
}
static boolean_t
spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
{
(void) zthr;
spa_t *spa = arg;
return (spa->spa_condensing_indirect != NULL);
}
static void
spa_condense_indirect_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *vd;
ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint32_t *counts;
uint64_t start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
space_map_t *prev_obsolete_sm = NULL;
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
/*
* The list must start out empty in order for the
* _commit_sync() sync task to be properly registered
* on the first call to _commit_entry(); so it's wise
* to double check and ensure we actually are starting
* with empty lists.
*/
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
if (prev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
counts, prev_obsolete_sm);
}
space_map_close(prev_obsolete_sm);
/*
* Generate new mapping. Determine what index to continue from
* based on the max offset that we've already written in the
* new mapping.
*/
uint64_t max_offset =
vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
if (max_offset == 0) {
/* We haven't written anything to the new mapping yet. */
start_index = 0;
} else {
/*
* Pick up from where we left off. _entry_for_offset()
* returns a pointer into the vim_entries array. If
* max_offset is greater than any of the mappings
* contained in the table NULL will be returned and
* that indicates we've exhausted our iteration of the
* old_mapping.
*/
vdev_indirect_mapping_entry_phys_t *entry =
vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
max_offset);
if (entry == NULL) {
/*
* We've already written the whole new mapping.
* This special value will cause us to skip the
* generate_new_mapping step and just do the sync
* task to complete the condense.
*/
start_index = UINT64_MAX;
} else {
start_index = entry - old_mapping->vim_entries;
ASSERT3U(start_index, <,
vdev_indirect_mapping_num_entries(old_mapping));
}
}
spa_condense_indirect_generate_new_mapping(vd, counts,
start_index, zthr);
vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
/*
* If the zthr has received a cancellation signal while running
* in generate_new_mapping() or at any point after that, then bail
* early. We don't want to complete the condense if the spa is
* shutting down.
*/
if (zthr_iscancelled(zthr))
return;
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
spa_condense_indirect_complete_sync, sci, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Sync task to begin the condensing process.
*/
void
spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
ASSERT0(scip->scip_next_mapping_object);
ASSERT0(scip->scip_prev_obsolete_sm_object);
ASSERT0(scip->scip_vdev);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
uint64_t obsolete_sm_obj;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
ASSERT3U(obsolete_sm_obj, !=, 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
/*
* We don't need to allocate a new space map object, since
* vdev_indirect_sync_obsolete will allocate one when needed.
*/
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
zthr_wakeup(spa->spa_condense_zthr);
}
/*
* Sync to the given vdev's obsolete space map any segments that are no longer
* referenced as of the given txg.
*
* If the obsolete space map doesn't exist yet, create and open it.
*/
void
vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
ASSERT3U(vic->vic_mapping_object, !=, 0);
ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object == 0) {
obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
zfs_vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
ASSERT3U(obsolete_sm_object, !=, 0);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
spa->spa_meta_objset, obsolete_sm_object,
0, vd->vdev_asize, 0));
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_write(vd->vdev_obsolete_sm,
vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
}
int
spa_condense_init(spa_t *spa)
{
int error = zap_lookup(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
&spa->spa_condensing_indirect_phys);
if (error == 0) {
if (spa_writeable(spa)) {
spa->spa_condensing_indirect =
spa_condensing_indirect_create(spa);
}
return (0);
} else if (error == ENOENT) {
return (0);
} else {
return (error);
}
}
void
spa_condense_fini(spa_t *spa)
{
if (spa->spa_condensing_indirect != NULL) {
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
}
}
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
ASSERT3P(spa->spa_condense_zthr, ==, NULL);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
}
/*
* Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
* will contain either the obsolete spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
/*
* Gets the obsolete count are precise spacemap object from the vdev's ZAP.
* On success are_precise will be set to reflect if the counts are precise.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*are_precise = B_FALSE;
return (0);
}
uint64_t val = 0;
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
if (error == 0) {
*are_precise = (val != 0);
} else if (error == ENOENT) {
*are_precise = B_FALSE;
error = 0;
}
return (error);
}
static void
vdev_indirect_close(vdev_t *vd)
{
(void) vd;
}
static int
vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
*psize = *max_psize = vd->vdev_asize +
VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*logical_ashift = vd->vdev_ashift;
*physical_ashift = vd->vdev_physical_ashift;
return (0);
}
typedef struct remap_segment {
vdev_t *rs_vd;
uint64_t rs_offset;
uint64_t rs_asize;
uint64_t rs_split_offset;
list_node_t rs_node;
} remap_segment_t;
static remap_segment_t *
rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
{
remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
rs->rs_vd = vd;
rs->rs_offset = offset;
rs->rs_asize = asize;
rs->rs_split_offset = split_offset;
return (rs);
}
/*
* Given an indirect vdev and an extent on that vdev, it duplicates the
* physical entries of the indirect mapping that correspond to the extent
* to a new array and returns a pointer to it. In addition, copied_entries
* is populated with the number of mapping entries that were duplicated.
*
* Note that the function assumes that the caller holds vdev_indirect_rwlock.
* This ensures that the mapping won't change due to condensing as we
* copy over its contents.
*
* Finally, since we are doing an allocation, it is up to the caller to
* free the array allocated in this function.
*/
static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
uint64_t asize, uint64_t *copied_entries)
{
vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t entries = 0;
ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
vdev_indirect_mapping_entry_phys_t *first_mapping =
vdev_indirect_mapping_entry_for_offset(vim, offset);
ASSERT3P(first_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *m = first_mapping;
while (asize > 0) {
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size = MIN(asize, size - inner_offset);
offset += inner_size;
asize -= inner_size;
entries++;
m++;
}
size_t copy_length = entries * sizeof (*first_mapping);
duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
memcpy(duplicate_mappings, first_mapping, copy_length);
*copied_entries = entries;
return (duplicate_mappings);
}
/*
* Goes through the relevant indirect mappings until it hits a concrete vdev
* and issues the callback. On the way to the concrete vdev, if any other
* indirect vdevs are encountered, then the callback will also be called on
* each of those indirect vdevs. For example, if the segment is mapped to
* segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
* mapped to segment B on concrete vdev 2, then the callback will be called on
* both vdev 1 and vdev 2.
*
* While the callback passed to vdev_indirect_remap() is called on every vdev
* the function encounters, certain callbacks only care about concrete vdevs.
* These types of callbacks should return immediately and explicitly when they
* are called on an indirect vdev.
*
* Because there is a possibility that a DVA section in the indirect device
* has been split into multiple sections in our mapping, we keep track
* of the relevant contiguous segments of the new location (remap_segment_t)
* in a stack. This way we can call the callback for each of the new sections
* created by a single section of the indirect device. Note though, that in
* this scenario the callbacks in each split block won't occur in-order in
* terms of offset, so callers should not make any assumptions about that.
*
* For callbacks that don't handle split blocks and immediately return when
* they encounter them (as is the case for remap_blkptr_cb), the caller can
* assume that its callback will be applied from the first indirect vdev
* encountered to the last one and then the concrete vdev, in that order.
*/
static void
vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
{
list_t stack;
spa_t *spa = vd->vdev_spa;
list_create(&stack, sizeof (remap_segment_t),
offsetof(remap_segment_t, rs_node));
for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
rs != NULL; rs = list_remove_head(&stack)) {
vdev_t *v = rs->rs_vd;
uint64_t num_entries = 0;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
ASSERT(rs->rs_asize > 0);
/*
* Note: As this function can be called from open context
* (e.g. zio_read()), we need the following rwlock to
* prevent the mapping from being changed by condensing.
*
* So we grab the lock and we make a copy of the entries
* that are relevant to the extent that we are working on.
* Once that is done, we drop the lock and iterate over
* our copy of the mapping. Once we are done with the with
* the remap segment and we free it, we also free our copy
* of the indirect mapping entries that are relevant to it.
*
* This way we don't need to wait until the function is
* finished with a segment, to condense it. In addition, we
* don't need a recursive rwlock for the case that a call to
* vdev_indirect_remap() needs to call itself (through the
* codepath of its callback) for the same vdev in the middle
* of its execution.
*/
rw_enter(&v->vdev_indirect_rwlock, RW_READER);
ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *mapping =
vdev_indirect_mapping_duplicate_adjacent_entries(v,
rs->rs_offset, rs->rs_asize, &num_entries);
ASSERT3P(mapping, !=, NULL);
ASSERT3U(num_entries, >, 0);
rw_exit(&v->vdev_indirect_rwlock);
for (uint64_t i = 0; i < num_entries; i++) {
/*
* Note: the vdev_indirect_mapping can not change
* while we are running. It only changes while the
* removal is in progress, and then only from syncing
* context. While a removal is in progress, this
* function is only called for frees, which also only
* happen from syncing context.
*/
vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
ASSERT3P(m, !=, NULL);
ASSERT3U(rs->rs_asize, >, 0);
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
ASSERT3U(rs->rs_offset, >=,
DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(rs->rs_offset, <,
DVA_MAPPING_GET_SRC_OFFSET(m) + size);
ASSERT3U(dst_vdev, !=, v->vdev_id);
uint64_t inner_offset = rs->rs_offset -
DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size =
MIN(rs->rs_asize, size - inner_offset);
vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
ASSERT3P(dst_v, !=, NULL);
if (dst_v->vdev_ops == &vdev_indirect_ops) {
list_insert_head(&stack,
rs_alloc(dst_v, dst_offset + inner_offset,
inner_size, rs->rs_split_offset));
}
if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
/*
* Note: This clause exists only solely for
* testing purposes. We use it to ensure that
* split blocks work and that the callbacks
* using them yield the same result if issued
* in reverse order.
*/
uint64_t inner_half = inner_size / 2;
func(rs->rs_split_offset + inner_half, dst_v,
dst_offset + inner_offset + inner_half,
inner_half, arg);
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_half, arg);
} else {
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_size, arg);
}
rs->rs_offset += inner_size;
rs->rs_asize -= inner_size;
rs->rs_split_offset += inner_size;
}
VERIFY0(rs->rs_asize);
kmem_free(mapping, num_entries * sizeof (*mapping));
kmem_free(rs, sizeof (remap_segment_t));
}
list_destroy(&stack);
}
static void
vdev_indirect_child_io_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
mutex_enter(&pio->io_lock);
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
mutex_exit(&pio->io_lock);
abd_free(zio->io_abd);
}
/*
* This is a callback for vdev_indirect_remap() which allocates an
* indirect_split_t for each split segment and adds it to iv_splits.
*/
static void
vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
zio_t *zio = arg;
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3P(vd, !=, NULL);
if (vd->vdev_ops == &vdev_indirect_ops)
return;
int n = 1;
if (vd->vdev_ops == &vdev_mirror_ops)
n = vd->vdev_children;
indirect_split_t *is =
kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
is->is_children = n;
is->is_size = size;
is->is_split_offset = split_offset;
is->is_target_offset = offset;
is->is_vdev = vd;
list_create(&is->is_unique_child, sizeof (indirect_child_t),
offsetof(indirect_child_t, ic_node));
/*
* Note that we only consider multiple copies of the data for
* *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
* though they use the same ops as mirror, because there's only one
* "good" copy under the replacing/spare.
*/
if (vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < n; i++) {
is->is_child[i].ic_vdev = vd->vdev_child[i];
list_link_init(&is->is_child[i].ic_node);
}
} else {
is->is_child[0].ic_vdev = vd;
}
list_insert_tail(&iv->iv_splits, is);
}
static void
vdev_indirect_read_split_done(zio_t *zio)
{
indirect_child_t *ic = zio->io_private;
if (zio->io_error != 0) {
/*
* Clear ic_data to indicate that we do not have data for this
* child.
*/
abd_free(ic->ic_data);
ic->ic_data = NULL;
}
}
/*
* Issue reads for all copies (mirror children) of all splits.
*/
static void
vdev_indirect_read_all(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (!vdev_readable(ic->ic_vdev))
continue;
/*
* If a child is missing the data, set ic_error. Used
* in vdev_indirect_repair(). We perform the read
* nevertheless which provides the opportunity to
* reconstruct the split block if at all possible.
*/
if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
zio->io_txg, 1))
ic->ic_error = SET_ERROR(ESTALE);
ic->ic_data = abd_alloc_sametype(zio->io_abd,
is->is_size);
ic->ic_duplicate = NULL;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset, ic->ic_data,
is->is_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_read_split_done, ic));
}
}
iv->iv_reconstruct = B_TRUE;
}
static void
vdev_indirect_io_start(zio_t *zio)
{
spa_t *spa __maybe_unused = zio->io_spa;
indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
list_create(&iv->iv_splits,
sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
zio->io_vsd = iv;
zio->io_vsd_ops = &vdev_indirect_vsd_ops;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (zio->io_type != ZIO_TYPE_READ) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
/*
* Note: this code can handle other kinds of writes,
* but we don't expect them.
*/
ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
}
vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
vdev_indirect_gather_splits, zio);
indirect_split_t *first = list_head(&iv->iv_splits);
ASSERT3P(first, !=, NULL);
if (first->is_size == zio->io_size) {
/*
* This is not a split block; we are pointing to the entire
* data, which will checksum the same as the original data.
* Pass the BP down so that the child i/o can verify the
* checksum, and try a different location if available
* (e.g. on a mirror).
*
* While this special case could be handled the same as the
* general (split block) case, doing it this way ensures
* that the vast majority of blocks on indirect vdevs
* (which are not split) are handled identically to blocks
* on non-indirect vdevs. This allows us to be less strict
* about performance in the general (but rare) case.
*/
ASSERT0(first->is_split_offset);
ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
first->is_vdev, first->is_target_offset,
abd_get_offset(zio->io_abd, 0),
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
} else {
iv->iv_split_block = B_TRUE;
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
/*
* Read all copies. Note that for simplicity,
* we don't bother consulting the DTL in the
* resilver case.
*/
vdev_indirect_read_all(zio);
} else {
/*
* If this is a read zio, we read one copy of each
* split segment, from the top-level vdev. Since
* we don't know the checksum of each split
* individually, the child zio can't ensure that
* we get the right data. E.g. if it's a mirror,
* it will just read from a random (healthy) leaf
* vdev. We have to verify the checksum in
* vdev_indirect_io_done().
*
* For write zios, the vdev code will ensure we write
* to all children.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
zio_nowait(zio_vdev_child_io(zio, NULL,
is->is_vdev, is->is_target_offset,
abd_get_offset_size(zio->io_abd,
is->is_split_offset, is->is_size),
is->is_size, zio->io_type,
zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
}
}
}
zio_execute(zio);
}
/*
* Report a checksum error for a child.
*/
static void
vdev_indirect_checksum_error(zio_t *zio,
indirect_split_t *is, indirect_child_t *ic)
{
vdev_t *vd = ic->ic_vdev;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
zio_bad_cksum_t zbc = {{{ 0 }}};
abd_t *bad_abd = ic->ic_data;
abd_t *good_abd = is->is_good_child->ic_data;
(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
}
/*
* Issue repair i/os for any incorrect copies. We do this by comparing
* each split segment's correct data (is_good_child's ic_data) with each
* other copy of the data. If they differ, then we overwrite the bad data
* with the good copy. The DTL is checked in vdev_indirect_read_all() and
* if a vdev is missing a copy of the data we set ic_error and the read is
* performed. This provides the opportunity to reconstruct the split block
* if at all possible. ic_error is checked here and if set it suppresses
* incrementing the checksum counter. Aside from this DTLs are not checked,
* which simplifies this code and also issues the optimal number of writes
* (based on which copies actually read bad data, as opposed to which we
* think might be wrong). For the same reason, we always use
* ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
*/
static void
vdev_indirect_repair(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (!spa_writeable(zio->io_spa))
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
if (ic->ic_duplicate == is->is_good_child)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset,
is->is_good_child->ic_data, is->is_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
NULL, NULL));
/*
* If ic_error is set the current child does not have
* a copy of the data, so suppress incrementing the
* checksum counter.
*/
if (ic->ic_error == ESTALE)
continue;
vdev_indirect_checksum_error(zio, is, ic);
}
}
}
/*
* Report checksum errors on all children that we read from.
*/
static void
vdev_indirect_all_checksum_errors(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data == NULL)
continue;
vdev_t *vd = ic->ic_vdev;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
NULL, zio, is->is_target_offset, is->is_size,
NULL, NULL, NULL);
}
}
}
/*
* Copy data from all the splits to a main zio then validate the checksum.
* If then checksum is successfully validated return success.
*/
static int
vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
{
zio_bad_cksum_t zbc;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
}
return (zio_checksum_error(zio, &zbc));
}
/*
* There are relatively few possible combinations making it feasible to
* deterministically check them all. We do this by setting the good_child
* to the next unique split version. If we reach the end of the list then
* "carry over" to the next unique split version (like counting in base
* is_unique_children, but each digit can have a different base).
*/
static int
vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
{
boolean_t more = B_TRUE;
iv->iv_attempts = 0;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is))
is->is_good_child = list_head(&is->is_unique_child);
while (more == B_TRUE) {
iv->iv_attempts++;
more = B_FALSE;
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_good_child = list_next(&is->is_unique_child,
is->is_good_child);
if (is->is_good_child != NULL) {
more = B_TRUE;
break;
}
is->is_good_child = list_head(&is->is_unique_child);
}
}
ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
return (SET_ERROR(ECKSUM));
}
/*
* There are too many combinations to try all of them in a reasonable amount
* of time. So try a fixed number of random combinations from the unique
* split versions, after which we'll consider the block unrecoverable.
*/
static int
vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
{
iv->iv_attempts = 0;
while (iv->iv_attempts < iv->iv_attempts_max) {
iv->iv_attempts++;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic = list_head(&is->is_unique_child);
int children = is->is_unique_children;
for (int i = random_in_range(children); i > 0; i--)
ic = list_next(&is->is_unique_child, ic);
ASSERT3P(ic, !=, NULL);
is->is_good_child = ic;
}
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
}
return (SET_ERROR(ECKSUM));
}
/*
* This is a validation function for reconstruction. It randomly selects
* a good combination, if one can be found, and then it intentionally
* damages all other segment copes by zeroing them. This forces the
* reconstruction algorithm to locate the one remaining known good copy.
*/
static int
vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
{
int error;
/* Presume all the copies are unique for initial selection. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (ic->ic_data != NULL) {
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic);
}
}
if (list_is_empty(&is->is_unique_child)) {
error = SET_ERROR(EIO);
goto out;
}
}
/*
* Set each is_good_child to a randomly-selected child which
* is known to contain validated data.
*/
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error)
goto out;
/*
* Damage all but the known good copy by zeroing it. This will
* result in two or less unique copies per indirect_child_t.
* Both may need to be checked in order to reconstruct the block.
* Set iv->iv_attempts_max such that all unique combinations will
* enumerated, but limit the damage to at most 12 indirect splits.
*/
iv->iv_attempts_max = 1;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
}
iv->iv_attempts_max *= 2;
if (iv->iv_attempts_max >= (1ULL << 12)) {
iv->iv_attempts_max = UINT64_MAX;
break;
}
}
out:
/* Empty the unique children lists so they can be reconstructed. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic;
- while ((ic = list_head(&is->is_unique_child)) != NULL)
- list_remove(&is->is_unique_child, ic);
+ while ((ic = list_remove_head(&is->is_unique_child)) != NULL)
+ ;
is->is_unique_children = 0;
}
return (error);
}
/*
* This function is called when we have read all copies of the data and need
* to try to find a combination of copies that gives us the right checksum.
*
* If we pointed to any mirror vdevs, this effectively does the job of the
* mirror. The mirror vdev code can't do its own job because we don't know
* the checksum of each split segment individually.
*
* We have to try every unique combination of copies of split segments, until
* we find one that checksums correctly. Duplicate segment copies are first
* identified and latter skipped during reconstruction. This optimization
* reduces the search space and ensures that of the remaining combinations
* at most one is correct.
*
* When the total number of combinations is small they can all be checked.
* For example, if we have 3 segments in the split, and each points to a
* 2-way mirror with unique copies, we will have the following pieces of data:
*
* | mirror child
* split | [0] [1]
* ======|=====================
* A | data_A_0 data_A_1
* B | data_B_0 data_B_1
* C | data_C_0 data_C_1
*
* We will try the following (mirror children)^(number of splits) (2^3=8)
* combinations, which is similar to bitwise-little-endian counting in
* binary. In general each "digit" corresponds to a split segment, and the
* base of each digit is is_children, which can be different for each
* digit.
*
* "low bit" "high bit"
* v v
* data_A_0 data_B_0 data_C_0
* data_A_1 data_B_0 data_C_0
* data_A_0 data_B_1 data_C_0
* data_A_1 data_B_1 data_C_0
* data_A_0 data_B_0 data_C_1
* data_A_1 data_B_0 data_C_1
* data_A_0 data_B_1 data_C_1
* data_A_1 data_B_1 data_C_1
*
* Note that the split segments may be on the same or different top-level
* vdevs. In either case, we may need to try lots of combinations (see
* zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
* has small silent errors on all of its children, we can still reconstruct
* the correct data, as long as those errors are at sufficiently-separated
* offsets (specifically, separated by the largest block size - default of
* 128KB, but up to 16MB).
*/
static void
vdev_indirect_reconstruct_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
boolean_t known_good = B_FALSE;
int error;
iv->iv_unique_combinations = 1;
iv->iv_attempts_max = UINT64_MAX;
if (zfs_reconstruct_indirect_combinations_max > 0)
iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
/*
* If nonzero, every 1/x blocks will be damaged, in order to validate
* reconstruction when there are split segments with damaged copies.
* Known_good will be TRUE when reconstruction is known to be possible.
*/
if (zfs_reconstruct_indirect_damage_fraction != 0 &&
random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
/*
* Determine the unique children for a split segment and add them
* to the is_unique_child list. By restricting reconstruction
* to these children, only unique combinations will be considered.
* This can vastly reduce the search space when there are a large
* number of indirect splits.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic_i = &is->is_child[i];
if (ic_i->ic_data == NULL ||
ic_i->ic_duplicate != NULL)
continue;
for (int j = i + 1; j < is->is_children; j++) {
indirect_child_t *ic_j = &is->is_child[j];
if (ic_j->ic_data == NULL ||
ic_j->ic_duplicate != NULL)
continue;
if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
ic_j->ic_duplicate = ic_i;
}
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic_i);
}
/* Reconstruction is impossible, no valid children */
EQUIV(list_is_empty(&is->is_unique_child),
is->is_unique_children == 0);
if (list_is_empty(&is->is_unique_child)) {
zio->io_error = EIO;
vdev_indirect_all_checksum_errors(zio);
zio_checksum_verified(zio);
return;
}
iv->iv_unique_combinations *= is->is_unique_children;
}
if (iv->iv_unique_combinations <= iv->iv_attempts_max)
error = vdev_indirect_splits_enumerate_all(iv, zio);
else
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error != 0) {
/* All attempted combinations failed. */
ASSERT3B(known_good, ==, B_FALSE);
zio->io_error = error;
vdev_indirect_all_checksum_errors(zio);
} else {
/*
* The checksum has been successfully validated. Issue
* repair I/Os to any copies of splits which don't match
* the validated version.
*/
ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
vdev_indirect_repair(zio);
zio_checksum_verified(zio);
}
}
static void
vdev_indirect_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (iv->iv_reconstruct) {
/*
* We have read all copies of the data (e.g. from mirrors),
* either because this was a scrub/resilver, or because the
* one-copy read didn't checksum correctly.
*/
vdev_indirect_reconstruct_io_done(zio);
return;
}
if (!iv->iv_split_block) {
/*
* This was not a split block, so we passed the BP down,
* and the checksum was handled by the (one) child zio.
*/
return;
}
zio_bad_cksum_t zbc;
int ret = zio_checksum_error(zio, &zbc);
if (ret == 0) {
zio_checksum_verified(zio);
return;
}
/*
* The checksum didn't match. Read all copies of all splits, and
* then we will try to reconstruct. The next time
* vdev_indirect_io_done() is called, iv_reconstruct will be set.
*/
vdev_indirect_read_all(zio);
zio_vdev_io_redone(zio);
}
vdev_ops_t vdev_indirect_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_indirect_open,
.vdev_op_close = vdev_indirect_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_indirect_io_start,
.vdev_op_io_done = vdev_indirect_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = vdev_indirect_remap,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* leaf vdev */
};
EXPORT_SYMBOL(spa_condense_fini);
EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
EXPORT_SYMBOL(spa_condense_indirect_start_sync);
EXPORT_SYMBOL(spa_condense_init);
EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_should_condense);
EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
EXPORT_SYMBOL(vdev_obsolete_sm_object);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT,
ZMOD_RW,
"Minimum obsolete percent of bytes in the mapping "
"to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, U64, ZMOD_RW,
"Don't bother condensing if the mapping uses less than this amount of "
"memory");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, U64,
ZMOD_RW,
"Minimum size obsolete spacemap to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
UINT, ZMOD_RW,
"Used by tests to ensure certain actions happen in the middle of a "
"condense. A maximum value of 1 should be sufficient.");
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
UINT, ZMOD_RW,
"Maximum number of combinations when reconstructing split segments");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_queue.c b/sys/contrib/openzfs/module/zfs/vdev_queue.c
index 1a75d68abd9e..abb7d0662b8c 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_queue.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_queue.c
@@ -1,1124 +1,1123 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/vdev_impl.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/avl.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/abd.h>
/*
* ZFS I/O Scheduler
* ---------------
*
* ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
* I/O scheduler determines when and in what order those operations are
* issued. The I/O scheduler divides operations into five I/O classes
* prioritized in the following order: sync read, sync write, async read,
* async write, and scrub/resilver. Each queue defines the minimum and
* maximum number of concurrent operations that may be issued to the device.
* In addition, the device has an aggregate maximum. Note that the sum of the
* per-queue minimums must not exceed the aggregate maximum. If the
* sum of the per-queue maximums exceeds the aggregate maximum, then the
* number of active i/os may reach zfs_vdev_max_active, in which case no
* further i/os will be issued regardless of whether all per-queue
* minimums have been met.
*
* For many physical devices, throughput increases with the number of
* concurrent operations, but latency typically suffers. Further, physical
* devices typically have a limit at which more concurrent operations have no
* effect on throughput or can actually cause it to decrease.
*
* The scheduler selects the next operation to issue by first looking for an
* I/O class whose minimum has not been satisfied. Once all are satisfied and
* the aggregate maximum has not been hit, the scheduler looks for classes
* whose maximum has not been satisfied. Iteration through the I/O classes is
* done in the order specified above. No further operations are issued if the
* aggregate maximum number of concurrent operations has been hit or if there
* are no operations queued for an I/O class that has not hit its maximum.
* Every time an i/o is queued or an operation completes, the I/O scheduler
* looks for new operations to issue.
*
* All I/O classes have a fixed maximum number of outstanding operations
* except for the async write class. Asynchronous writes represent the data
* that is committed to stable storage during the syncing stage for
* transaction groups (see txg.c). Transaction groups enter the syncing state
* periodically so the number of queued async writes will quickly burst up and
* then bleed down to zero. Rather than servicing them as quickly as possible,
* the I/O scheduler changes the maximum number of active async write i/os
* according to the amount of dirty data in the pool (see dsl_pool.c). Since
* both throughput and latency typically increase with the number of
* concurrent operations issued to physical devices, reducing the burstiness
* in the number of concurrent operations also stabilizes the response time of
* operations from other -- and in particular synchronous -- queues. In broad
* strokes, the I/O scheduler will issue more concurrent operations from the
* async write queue as there's more dirty data in the pool.
*
* Async Writes
*
* The number of concurrent operations issued for the async write I/O class
* follows a piece-wise linear function defined by a few adjustable points.
*
* | o---------| <-- zfs_vdev_async_write_max_active
* ^ | /^ |
* | | / | |
* active | / | |
* I/O | / | |
* count | / | |
* | / | |
* |------------o | | <-- zfs_vdev_async_write_min_active
* 0|____________^______|_________|
* 0% | | 100% of zfs_dirty_data_max
* | |
* | `-- zfs_vdev_async_write_active_max_dirty_percent
* `--------- zfs_vdev_async_write_active_min_dirty_percent
*
* Until the amount of dirty data exceeds a minimum percentage of the dirty
* data allowed in the pool, the I/O scheduler will limit the number of
* concurrent operations to the minimum. As that threshold is crossed, the
* number of concurrent operations issued increases linearly to the maximum at
* the specified maximum percentage of the dirty data allowed in the pool.
*
* Ideally, the amount of dirty data on a busy pool will stay in the sloped
* part of the function between zfs_vdev_async_write_active_min_dirty_percent
* and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
* maximum percentage, this indicates that the rate of incoming data is
* greater than the rate that the backend storage can handle. In this case, we
* must further throttle incoming writes (see dmu_tx_delay() for details).
*/
/*
* The maximum number of i/os active to each device. Ideally, this will be >=
* the sum of each queue's max_active.
*/
uint_t zfs_vdev_max_active = 1000;
/*
* Per-queue limits on the number of i/os active to each device. If the
* number of active i/os is < zfs_vdev_max_active, then the min_active comes
* into play. We will send min_active from each queue round-robin, and then
* send from queues in the order defined by zio_priority_t up to max_active.
* Some queues have additional mechanisms to limit number of active I/Os in
* addition to min_active and max_active, see below.
*
* In general, smaller max_active's will lead to lower latency of synchronous
* operations. Larger max_active's may lead to higher overall throughput,
* depending on underlying storage.
*
* The ratio of the queues' max_actives determines the balance of performance
* between reads, writes, and scrubs. E.g., increasing
* zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
* more quickly, but reads and writes to have higher latency and lower
* throughput.
*/
static uint_t zfs_vdev_sync_read_min_active = 10;
static uint_t zfs_vdev_sync_read_max_active = 10;
static uint_t zfs_vdev_sync_write_min_active = 10;
static uint_t zfs_vdev_sync_write_max_active = 10;
static uint_t zfs_vdev_async_read_min_active = 1;
/* */ uint_t zfs_vdev_async_read_max_active = 3;
static uint_t zfs_vdev_async_write_min_active = 2;
/* */ uint_t zfs_vdev_async_write_max_active = 10;
static uint_t zfs_vdev_scrub_min_active = 1;
static uint_t zfs_vdev_scrub_max_active = 3;
static uint_t zfs_vdev_removal_min_active = 1;
static uint_t zfs_vdev_removal_max_active = 2;
static uint_t zfs_vdev_initializing_min_active = 1;
static uint_t zfs_vdev_initializing_max_active = 1;
static uint_t zfs_vdev_trim_min_active = 1;
static uint_t zfs_vdev_trim_max_active = 2;
static uint_t zfs_vdev_rebuild_min_active = 1;
static uint_t zfs_vdev_rebuild_max_active = 3;
/*
* When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
* dirty data, use zfs_vdev_async_write_min_active. When it has more than
* zfs_vdev_async_write_active_max_dirty_percent, use
* zfs_vdev_async_write_max_active. The value is linearly interpolated
* between min and max.
*/
uint_t zfs_vdev_async_write_active_min_dirty_percent = 30;
uint_t zfs_vdev_async_write_active_max_dirty_percent = 60;
/*
* For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
* the number of concurrently-active I/O's is limited to *_min_active, unless
* the vdev is "idle". When there are no interactive I/Os active (sync or
* async), and zfs_vdev_nia_delay I/Os have completed since the last
* interactive I/O, then the vdev is considered to be "idle", and the number
* of concurrently-active non-interactive I/O's is increased to *_max_active.
*/
static uint_t zfs_vdev_nia_delay = 5;
/*
* Some HDDs tend to prioritize sequential I/O so high that concurrent
* random I/O latency reaches several seconds. On some HDDs it happens
* even if sequential I/Os are submitted one at a time, and so setting
* *_max_active to 1 does not help. To prevent non-interactive I/Os, like
* scrub, from monopolizing the device no more than zfs_vdev_nia_credit
* I/Os can be sent while there are outstanding incomplete interactive
* I/Os. This enforced wait ensures the HDD services the interactive I/O
* within a reasonable amount of time.
*/
static uint_t zfs_vdev_nia_credit = 5;
/*
* To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
* For read I/Os, we also aggregate across small adjacency gaps; for writes
* we include spans of optional I/Os to aid aggregation at the disk even when
* they aren't able to help us aggregate at this level.
*/
static uint_t zfs_vdev_aggregation_limit = 1 << 20;
static uint_t zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE;
static uint_t zfs_vdev_read_gap_limit = 32 << 10;
static uint_t zfs_vdev_write_gap_limit = 4 << 10;
/*
* Define the queue depth percentage for each top-level. This percentage is
* used in conjunction with zfs_vdev_async_max_active to determine how many
* allocations a specific top-level vdev should handle. Once the queue depth
* reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
* then allocator will stop allocating blocks on that top-level device.
* The default kernel setting is 1000% which will yield 100 allocations per
* device. For userland testing, the default setting is 300% which equates
* to 30 allocations per device.
*/
#ifdef _KERNEL
uint_t zfs_vdev_queue_depth_pct = 1000;
#else
uint_t zfs_vdev_queue_depth_pct = 300;
#endif
/*
* When performing allocations for a given metaslab, we want to make sure that
* there are enough IOs to aggregate together to improve throughput. We want to
* ensure that there are at least 128k worth of IOs that can be aggregated, and
* we assume that the average allocation size is 4k, so we need the queue depth
* to be 32 per allocator to get good aggregation of sequential writes.
*/
uint_t zfs_vdev_def_queue_depth = 32;
/*
* Allow TRIM I/Os to be aggregated. This should normally not be needed since
* TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
* by the TRIM code in zfs_trim.c.
*/
static uint_t zfs_vdev_aggregate_trim = 0;
static int
vdev_queue_offset_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;
const zio_t *z2 = (const zio_t *)x2;
int cmp = TREE_CMP(z1->io_offset, z2->io_offset);
if (likely(cmp))
return (cmp);
return (TREE_PCMP(z1, z2));
}
static inline avl_tree_t *
vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
{
return (&vq->vq_class[p].vqc_queued_tree);
}
static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
{
ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM);
if (t == ZIO_TYPE_READ)
return (&vq->vq_read_offset_tree);
else if (t == ZIO_TYPE_WRITE)
return (&vq->vq_write_offset_tree);
else
return (&vq->vq_trim_offset_tree);
}
static int
vdev_queue_timestamp_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;
const zio_t *z2 = (const zio_t *)x2;
int cmp = TREE_CMP(z1->io_timestamp, z2->io_timestamp);
if (likely(cmp))
return (cmp);
return (TREE_PCMP(z1, z2));
}
static uint_t
vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p)
{
switch (p) {
case ZIO_PRIORITY_SYNC_READ:
return (zfs_vdev_sync_read_min_active);
case ZIO_PRIORITY_SYNC_WRITE:
return (zfs_vdev_sync_write_min_active);
case ZIO_PRIORITY_ASYNC_READ:
return (zfs_vdev_async_read_min_active);
case ZIO_PRIORITY_ASYNC_WRITE:
return (zfs_vdev_async_write_min_active);
case ZIO_PRIORITY_SCRUB:
return (vq->vq_ia_active == 0 ? zfs_vdev_scrub_min_active :
MIN(vq->vq_nia_credit, zfs_vdev_scrub_min_active));
case ZIO_PRIORITY_REMOVAL:
return (vq->vq_ia_active == 0 ? zfs_vdev_removal_min_active :
MIN(vq->vq_nia_credit, zfs_vdev_removal_min_active));
case ZIO_PRIORITY_INITIALIZING:
return (vq->vq_ia_active == 0 ?zfs_vdev_initializing_min_active:
MIN(vq->vq_nia_credit, zfs_vdev_initializing_min_active));
case ZIO_PRIORITY_TRIM:
return (zfs_vdev_trim_min_active);
case ZIO_PRIORITY_REBUILD:
return (vq->vq_ia_active == 0 ? zfs_vdev_rebuild_min_active :
MIN(vq->vq_nia_credit, zfs_vdev_rebuild_min_active));
default:
panic("invalid priority %u", p);
return (0);
}
}
static uint_t
vdev_queue_max_async_writes(spa_t *spa)
{
uint_t writes;
uint64_t dirty = 0;
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint64_t max_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_max_dirty_percent / 100;
/*
* Async writes may occur before the assignment of the spa's
* dsl_pool_t if a self-healing zio is issued prior to the
* completion of dmu_objset_open_impl().
*/
if (dp == NULL)
return (zfs_vdev_async_write_max_active);
/*
* Sync tasks correspond to interactive user actions. To reduce the
* execution time of those actions we push data out as fast as possible.
*/
dirty = dp->dp_dirty_total;
if (dirty > max_bytes || spa_has_pending_synctask(spa))
return (zfs_vdev_async_write_max_active);
if (dirty < min_bytes)
return (zfs_vdev_async_write_min_active);
/*
* linear interpolation:
* slope = (max_writes - min_writes) / (max_bytes - min_bytes)
* move right by min_bytes
* move up by min_writes
*/
writes = (dirty - min_bytes) *
(zfs_vdev_async_write_max_active -
zfs_vdev_async_write_min_active) /
(max_bytes - min_bytes) +
zfs_vdev_async_write_min_active;
ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
return (writes);
}
static uint_t
vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p)
{
switch (p) {
case ZIO_PRIORITY_SYNC_READ:
return (zfs_vdev_sync_read_max_active);
case ZIO_PRIORITY_SYNC_WRITE:
return (zfs_vdev_sync_write_max_active);
case ZIO_PRIORITY_ASYNC_READ:
return (zfs_vdev_async_read_max_active);
case ZIO_PRIORITY_ASYNC_WRITE:
return (vdev_queue_max_async_writes(spa));
case ZIO_PRIORITY_SCRUB:
if (vq->vq_ia_active > 0) {
return (MIN(vq->vq_nia_credit,
zfs_vdev_scrub_min_active));
} else if (vq->vq_nia_credit < zfs_vdev_nia_delay)
return (MAX(1, zfs_vdev_scrub_min_active));
return (zfs_vdev_scrub_max_active);
case ZIO_PRIORITY_REMOVAL:
if (vq->vq_ia_active > 0) {
return (MIN(vq->vq_nia_credit,
zfs_vdev_removal_min_active));
} else if (vq->vq_nia_credit < zfs_vdev_nia_delay)
return (MAX(1, zfs_vdev_removal_min_active));
return (zfs_vdev_removal_max_active);
case ZIO_PRIORITY_INITIALIZING:
if (vq->vq_ia_active > 0) {
return (MIN(vq->vq_nia_credit,
zfs_vdev_initializing_min_active));
} else if (vq->vq_nia_credit < zfs_vdev_nia_delay)
return (MAX(1, zfs_vdev_initializing_min_active));
return (zfs_vdev_initializing_max_active);
case ZIO_PRIORITY_TRIM:
return (zfs_vdev_trim_max_active);
case ZIO_PRIORITY_REBUILD:
if (vq->vq_ia_active > 0) {
return (MIN(vq->vq_nia_credit,
zfs_vdev_rebuild_min_active));
} else if (vq->vq_nia_credit < zfs_vdev_nia_delay)
return (MAX(1, zfs_vdev_rebuild_min_active));
return (zfs_vdev_rebuild_max_active);
default:
panic("invalid priority %u", p);
return (0);
}
}
/*
* Return the i/o class to issue from, or ZIO_PRIORITY_NUM_QUEUEABLE if
* there is no eligible class.
*/
static zio_priority_t
vdev_queue_class_to_issue(vdev_queue_t *vq)
{
spa_t *spa = vq->vq_vdev->vdev_spa;
zio_priority_t p, n;
if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
return (ZIO_PRIORITY_NUM_QUEUEABLE);
/*
* Find a queue that has not reached its minimum # outstanding i/os.
* Do round-robin to reduce starvation due to zfs_vdev_max_active
* and vq_nia_credit limits.
*/
for (n = 0; n < ZIO_PRIORITY_NUM_QUEUEABLE; n++) {
p = (vq->vq_last_prio + n + 1) % ZIO_PRIORITY_NUM_QUEUEABLE;
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
vq->vq_class[p].vqc_active <
vdev_queue_class_min_active(vq, p)) {
vq->vq_last_prio = p;
return (p);
}
}
/*
* If we haven't found a queue, look for one that hasn't reached its
* maximum # outstanding i/os.
*/
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
vq->vq_class[p].vqc_active <
vdev_queue_class_max_active(spa, vq, p)) {
vq->vq_last_prio = p;
return (p);
}
}
/* No eligible queued i/os */
return (ZIO_PRIORITY_NUM_QUEUEABLE);
}
void
vdev_queue_init(vdev_t *vd)
{
vdev_queue_t *vq = &vd->vdev_queue;
zio_priority_t p;
mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
vq->vq_vdev = vd;
taskq_init_ent(&vd->vdev_queue.vq_io_search.io_tqent);
avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
int (*compfn) (const void *, const void *);
/*
* The synchronous/trim i/o queues are dispatched in FIFO rather
* than LBA order. This provides more consistent latency for
* these i/os.
*/
if (p == ZIO_PRIORITY_SYNC_READ ||
p == ZIO_PRIORITY_SYNC_WRITE ||
p == ZIO_PRIORITY_TRIM) {
compfn = vdev_queue_timestamp_compare;
} else {
compfn = vdev_queue_offset_compare;
}
avl_create(vdev_queue_class_tree(vq, p), compfn,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
}
vq->vq_last_offset = 0;
}
void
vdev_queue_fini(vdev_t *vd)
{
vdev_queue_t *vq = &vd->vdev_queue;
for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
avl_destroy(vdev_queue_class_tree(vq, p));
avl_destroy(&vq->vq_active_tree);
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM));
mutex_destroy(&vq->vq_lock);
}
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
}
static void
vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
{
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
}
static boolean_t
vdev_queue_is_interactive(zio_priority_t p)
{
switch (p) {
case ZIO_PRIORITY_SCRUB:
case ZIO_PRIORITY_REMOVAL:
case ZIO_PRIORITY_INITIALIZING:
case ZIO_PRIORITY_REBUILD:
return (B_FALSE);
default:
return (B_TRUE);
}
}
static void
vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
{
ASSERT(MUTEX_HELD(&vq->vq_lock));
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
vq->vq_class[zio->io_priority].vqc_active++;
if (vdev_queue_is_interactive(zio->io_priority)) {
if (++vq->vq_ia_active == 1)
vq->vq_nia_credit = 1;
} else if (vq->vq_ia_active > 0) {
vq->vq_nia_credit--;
}
avl_add(&vq->vq_active_tree, zio);
}
static void
vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
{
ASSERT(MUTEX_HELD(&vq->vq_lock));
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
vq->vq_class[zio->io_priority].vqc_active--;
if (vdev_queue_is_interactive(zio->io_priority)) {
if (--vq->vq_ia_active == 0)
vq->vq_nia_credit = 0;
else
vq->vq_nia_credit = zfs_vdev_nia_credit;
} else if (vq->vq_ia_active == 0)
vq->vq_nia_credit++;
avl_remove(&vq->vq_active_tree, zio);
}
static void
vdev_queue_agg_io_done(zio_t *aio)
{
abd_free(aio->io_abd);
}
/*
* Compute the range spanned by two i/os, which is the endpoint of the last
* (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
* Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
* thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
*/
#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
/*
* Sufficiently adjacent io_offset's in ZIOs will be aggregated. We do this
* by creating a gang ABD from the adjacent ZIOs io_abd's. By using
* a gang ABD we avoid doing memory copies to and from the parent,
* child ZIOs. The gang ABD also accounts for gaps between adjacent
* io_offsets by simply getting the zero ABD for writes or allocating
* a new ABD for reads and placing them in the gang ABD as well.
*/
static zio_t *
vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
{
zio_t *first, *last, *aio, *dio, *mandatory, *nio;
uint64_t maxgap = 0;
uint64_t size;
uint64_t limit;
int maxblocksize;
boolean_t stretch = B_FALSE;
avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
zio_flag_t flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
uint64_t next_offset;
abd_t *abd;
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
if (vq->vq_vdev->vdev_nonrot)
limit = zfs_vdev_aggregation_limit_non_rotating;
else
limit = zfs_vdev_aggregation_limit;
limit = MIN(limit, maxblocksize);
if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0)
return (NULL);
/*
* While TRIM commands could be aggregated based on offset this
* behavior is disabled until it's determined to be beneficial.
*/
if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim)
return (NULL);
/*
* I/Os to distributed spares are directly dispatched to the dRAID
* leaf vdevs for aggregation. See the comment at the end of the
* zio_vdev_io_start() function.
*/
ASSERT(vq->vq_vdev->vdev_ops != &vdev_draid_spare_ops);
first = last = zio;
if (zio->io_type == ZIO_TYPE_READ)
maxgap = zfs_vdev_read_gap_limit;
/*
* We can aggregate I/Os that are sufficiently adjacent and of
* the same flavor, as expressed by the AGG_INHERIT flags.
* The latter requirement is necessary so that certain
* attributes of the I/O, such as whether it's a normal I/O
* or a scrub/resilver, can be preserved in the aggregate.
* We can include optional I/Os, but don't allow them
* to begin a range as they add no benefit in that situation.
*/
/*
* We keep track of the last non-optional I/O.
*/
mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
/*
* Walk backwards through sufficiently contiguous I/Os
* recording the last non-optional I/O.
*/
while ((dio = AVL_PREV(t, first)) != NULL &&
(dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
IO_SPAN(dio, last) <= limit &&
IO_GAP(dio, first) <= maxgap &&
dio->io_type == zio->io_type) {
first = dio;
if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
mandatory = first;
}
/*
* Skip any initial optional I/Os.
*/
while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
first = AVL_NEXT(t, first);
ASSERT(first != NULL);
}
/*
* Walk forward through sufficiently contiguous I/Os.
* The aggregation limit does not apply to optional i/os, so that
* we can issue contiguous writes even if they are larger than the
* aggregation limit.
*/
while ((dio = AVL_NEXT(t, last)) != NULL &&
(dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
(IO_SPAN(first, dio) <= limit ||
(dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
IO_SPAN(first, dio) <= maxblocksize &&
IO_GAP(last, dio) <= maxgap &&
dio->io_type == zio->io_type) {
last = dio;
if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
mandatory = last;
}
/*
* Now that we've established the range of the I/O aggregation
* we must decide what to do with trailing optional I/Os.
* For reads, there's nothing to do. While we are unable to
* aggregate further, it's possible that a trailing optional
* I/O would allow the underlying device to aggregate with
* subsequent I/Os. We must therefore determine if the next
* non-optional I/O is close enough to make aggregation
* worthwhile.
*/
if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
zio_t *nio = last;
while ((dio = AVL_NEXT(t, nio)) != NULL &&
IO_GAP(nio, dio) == 0 &&
IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
nio = dio;
if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
stretch = B_TRUE;
break;
}
}
}
if (stretch) {
/*
* We are going to include an optional io in our aggregated
* span, thus closing the write gap. Only mandatory i/os can
* start aggregated spans, so make sure that the next i/o
* after our span is mandatory.
*/
dio = AVL_NEXT(t, last);
ASSERT3P(dio, !=, NULL);
dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
} else {
/* do not include the optional i/o */
while (last != mandatory && last != first) {
ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
last = AVL_PREV(t, last);
ASSERT(last != NULL);
}
}
if (first == last)
return (NULL);
size = IO_SPAN(first, last);
ASSERT3U(size, <=, maxblocksize);
abd = abd_alloc_gang();
if (abd == NULL)
return (NULL);
aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
abd, size, first->io_type, zio->io_priority,
- flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
- vdev_queue_agg_io_done, NULL);
+ flags | ZIO_FLAG_DONT_QUEUE, vdev_queue_agg_io_done, NULL);
aio->io_timestamp = first->io_timestamp;
nio = first;
next_offset = first->io_offset;
do {
dio = nio;
nio = AVL_NEXT(t, dio);
ASSERT3P(dio, !=, NULL);
zio_add_child(dio, aio);
vdev_queue_io_remove(vq, dio);
if (dio->io_offset != next_offset) {
/* allocate a buffer for a read gap */
ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ);
ASSERT3U(dio->io_offset, >, next_offset);
abd = abd_alloc_for_io(
dio->io_offset - next_offset, B_TRUE);
abd_gang_add(aio->io_abd, abd, B_TRUE);
}
if (dio->io_abd &&
(dio->io_size != abd_get_size(dio->io_abd))) {
/* abd size not the same as IO size */
ASSERT3U(abd_get_size(dio->io_abd), >, dio->io_size);
abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size);
abd_gang_add(aio->io_abd, abd, B_TRUE);
} else {
if (dio->io_flags & ZIO_FLAG_NODATA) {
/* allocate a buffer for a write gap */
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3P(dio->io_abd, ==, NULL);
abd_gang_add(aio->io_abd,
abd_get_zeros(dio->io_size), B_TRUE);
} else {
/*
* We pass B_FALSE to abd_gang_add()
* because we did not allocate a new
* ABD, so it is assumed the caller
* will free this ABD.
*/
abd_gang_add(aio->io_abd, dio->io_abd,
B_FALSE);
}
}
next_offset = dio->io_offset + dio->io_size;
} while (dio != last);
ASSERT3U(abd_get_size(aio->io_abd), ==, aio->io_size);
/*
* Callers must call zio_vdev_io_bypass() and zio_execute() for
* aggregated (parent) I/Os so that we could avoid dropping the
* queue's lock here to avoid a deadlock that we could encounter
* due to lock order reversal between vq_lock and io_lock in
* zio_change_priority().
*/
return (aio);
}
static zio_t *
vdev_queue_io_to_issue(vdev_queue_t *vq)
{
zio_t *zio, *aio;
zio_priority_t p;
avl_index_t idx;
avl_tree_t *tree;
again:
ASSERT(MUTEX_HELD(&vq->vq_lock));
p = vdev_queue_class_to_issue(vq);
if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
/* No eligible queued i/os */
return (NULL);
}
/*
* For LBA-ordered queues (async / scrub / initializing), issue the
* i/o which follows the most recently issued i/o in LBA (offset) order.
*
* For FIFO queues (sync/trim), issue the i/o with the lowest timestamp.
*/
tree = vdev_queue_class_tree(vq, p);
vq->vq_io_search.io_timestamp = 0;
vq->vq_io_search.io_offset = vq->vq_last_offset - 1;
VERIFY3P(avl_find(tree, &vq->vq_io_search, &idx), ==, NULL);
zio = avl_nearest(tree, idx, AVL_AFTER);
if (zio == NULL)
zio = avl_first(tree);
ASSERT3U(zio->io_priority, ==, p);
aio = vdev_queue_aggregate(vq, zio);
if (aio != NULL) {
zio = aio;
} else {
vdev_queue_io_remove(vq, zio);
/*
* If the I/O is or was optional and therefore has no data, we
* need to simply discard it. We need to drop the vdev queue's
* lock to avoid a deadlock that we could encounter since this
* I/O will complete immediately.
*/
if (zio->io_flags & ZIO_FLAG_NODATA) {
mutex_exit(&vq->vq_lock);
zio_vdev_io_bypass(zio);
zio_execute(zio);
mutex_enter(&vq->vq_lock);
goto again;
}
}
vdev_queue_pending_add(vq, zio);
vq->vq_last_offset = zio->io_offset + zio->io_size;
return (zio);
}
zio_t *
vdev_queue_io(zio_t *zio)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
zio_t *dio, *nio;
zio_link_t *zl = NULL;
if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
return (zio);
/*
* Children i/os inherent their parent's priority, which might
* not match the child's i/o type. Fix it up here.
*/
if (zio->io_type == ZIO_TYPE_READ) {
ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
zio->io_priority != ZIO_PRIORITY_SCRUB &&
zio->io_priority != ZIO_PRIORITY_REMOVAL &&
zio->io_priority != ZIO_PRIORITY_INITIALIZING &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
zio->io_priority != ZIO_PRIORITY_REMOVAL &&
zio->io_priority != ZIO_PRIORITY_INITIALIZING &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
}
} else {
ASSERT(zio->io_type == ZIO_TYPE_TRIM);
ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM);
}
- zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
+ zio->io_flags |= ZIO_FLAG_DONT_QUEUE;
zio->io_timestamp = gethrtime();
mutex_enter(&vq->vq_lock);
vdev_queue_io_add(vq, zio);
nio = vdev_queue_io_to_issue(vq);
mutex_exit(&vq->vq_lock);
if (nio == NULL)
return (NULL);
if (nio->io_done == vdev_queue_agg_io_done) {
while ((dio = zio_walk_parents(nio, &zl)) != NULL) {
ASSERT3U(dio->io_type, ==, nio->io_type);
zio_vdev_io_bypass(dio);
zio_execute(dio);
}
zio_nowait(nio);
return (NULL);
}
return (nio);
}
void
vdev_queue_io_done(zio_t *zio)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
zio_t *dio, *nio;
zio_link_t *zl = NULL;
hrtime_t now = gethrtime();
vq->vq_io_complete_ts = now;
vq->vq_io_delta_ts = zio->io_delta = now - zio->io_timestamp;
mutex_enter(&vq->vq_lock);
vdev_queue_pending_remove(vq, zio);
while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
mutex_exit(&vq->vq_lock);
if (nio->io_done == vdev_queue_agg_io_done) {
while ((dio = zio_walk_parents(nio, &zl)) != NULL) {
ASSERT3U(dio->io_type, ==, nio->io_type);
zio_vdev_io_bypass(dio);
zio_execute(dio);
}
zio_nowait(nio);
} else {
zio_vdev_io_reissue(nio);
zio_execute(nio);
}
mutex_enter(&vq->vq_lock);
}
mutex_exit(&vq->vq_lock);
}
void
vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
avl_tree_t *tree;
/*
* ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
* code to issue IOs without adding them to the vdev queue. In this
* case, the zio is already going to be issued as quickly as possible
* and so it doesn't need any reprioritization to help.
*/
if (zio->io_priority == ZIO_PRIORITY_NOW)
return;
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (zio->io_type == ZIO_TYPE_READ) {
if (priority != ZIO_PRIORITY_SYNC_READ &&
priority != ZIO_PRIORITY_ASYNC_READ &&
priority != ZIO_PRIORITY_SCRUB)
priority = ZIO_PRIORITY_ASYNC_READ;
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
if (priority != ZIO_PRIORITY_SYNC_WRITE &&
priority != ZIO_PRIORITY_ASYNC_WRITE)
priority = ZIO_PRIORITY_ASYNC_WRITE;
}
mutex_enter(&vq->vq_lock);
/*
* If the zio is in none of the queues we can simply change
* the priority. If the zio is waiting to be submitted we must
* remove it from the queue and re-insert it with the new priority.
* Otherwise, the zio is currently active and we cannot change its
* priority.
*/
tree = vdev_queue_class_tree(vq, zio->io_priority);
if (avl_find(tree, zio, NULL) == zio) {
avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
zio->io_priority = priority;
avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
} else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) {
zio->io_priority = priority;
}
mutex_exit(&vq->vq_lock);
}
/*
* As these two methods are only used for load calculations we're not
* concerned if we get an incorrect value on 32bit platforms due to lack of
* vq_lock mutex use here, instead we prefer to keep it lock free for
* performance.
*/
int
vdev_queue_length(vdev_t *vd)
{
return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
}
uint64_t
vdev_queue_last_offset(vdev_t *vd)
{
return (vd->vdev_queue.vq_last_offset);
}
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, UINT, ZMOD_RW,
"Max vdev I/O aggregation size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, UINT,
ZMOD_RW, "Max vdev I/O aggregation size for non-rotating media");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, UINT, ZMOD_RW,
"Allow TRIM I/O to be aggregated");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, UINT, ZMOD_RW,
"Aggregate read I/O over gap");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, UINT, ZMOD_RW,
"Aggregate write I/O over gap");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, UINT, ZMOD_RW,
"Maximum number of active I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent,
UINT, ZMOD_RW, "Async write concurrency max threshold");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent,
UINT, ZMOD_RW, "Async write concurrency min threshold");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, UINT, ZMOD_RW,
"Max active async read I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, UINT, ZMOD_RW,
"Min active async read I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, UINT, ZMOD_RW,
"Max active async write I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, UINT, ZMOD_RW,
"Min active async write I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, UINT, ZMOD_RW,
"Max active initializing I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, UINT, ZMOD_RW,
"Min active initializing I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, UINT, ZMOD_RW,
"Max active removal I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, UINT, ZMOD_RW,
"Min active removal I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, UINT, ZMOD_RW,
"Max active scrub I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, UINT, ZMOD_RW,
"Min active scrub I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, UINT, ZMOD_RW,
"Max active sync read I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, UINT, ZMOD_RW,
"Min active sync read I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, UINT, ZMOD_RW,
"Max active sync write I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, UINT, ZMOD_RW,
"Min active sync write I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, UINT, ZMOD_RW,
"Max active trim/discard I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, UINT, ZMOD_RW,
"Min active trim/discard I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, UINT, ZMOD_RW,
"Max active rebuild I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, UINT, ZMOD_RW,
"Min active rebuild I/Os per vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, UINT, ZMOD_RW,
"Number of non-interactive I/Os to allow in sequence");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, UINT, ZMOD_RW,
"Number of non-interactive I/Os before _max_active");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, UINT, ZMOD_RW,
"Queue depth percentage for each top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, def_queue_depth, UINT, ZMOD_RW,
"Default queue depth for each allocator");
diff --git a/sys/contrib/openzfs/module/zfs/zap_micro.c b/sys/contrib/openzfs/module/zfs/zap_micro.c
index d6ad8b2b8bc5..085d9cd8b4b6 100644
--- a/sys/contrib/openzfs/module/zfs/zap_micro.c
+++ b/sys/contrib/openzfs/module/zfs/zap_micro.c
@@ -1,1733 +1,1737 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
#include <sys/zio.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/zfs_context.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/zap_leaf.h>
#include <sys/btree.h>
#include <sys/arc.h>
#include <sys/dmu_objset.h>
#ifdef _KERNEL
#include <sys/sunddi.h>
#endif
int zap_micro_max_size = MZAP_MAX_BLKSZ;
static int mzap_upgrade(zap_t **zapp,
const void *tag, dmu_tx_t *tx, zap_flags_t flags);
uint64_t
zap_getflags(zap_t *zap)
{
if (zap->zap_ismicro)
return (0);
return (zap_f_phys(zap)->zap_flags);
}
int
zap_hashbits(zap_t *zap)
{
if (zap_getflags(zap) & ZAP_FLAG_HASH64)
return (48);
else
return (28);
}
uint32_t
zap_maxcd(zap_t *zap)
{
if (zap_getflags(zap) & ZAP_FLAG_HASH64)
return ((1<<16)-1);
else
return (-1U);
}
static uint64_t
zap_hash(zap_name_t *zn)
{
zap_t *zap = zn->zn_zap;
uint64_t h = 0;
if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
h = *(uint64_t *)zn->zn_key_orig;
} else {
h = zap->zap_salt;
ASSERT(h != 0);
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
const uint64_t *wp = zn->zn_key_norm;
ASSERT(zn->zn_key_intlen == 8);
for (int i = 0; i < zn->zn_key_norm_numints;
wp++, i++) {
uint64_t word = *wp;
for (int j = 0; j < 8; j++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ word) & 0xFF];
word >>= NBBY;
}
}
} else {
const uint8_t *cp = zn->zn_key_norm;
/*
* We previously stored the terminating null on
* disk, but didn't hash it, so we need to
* continue to not hash it. (The
* zn_key_*_numints includes the terminating
* null for non-binary keys.)
*/
int len = zn->zn_key_norm_numints - 1;
ASSERT(zn->zn_key_intlen == 1);
for (int i = 0; i < len; cp++, i++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ *cp) & 0xFF];
}
}
}
/*
* Don't use all 64 bits, since we need some in the cookie for
* the collision differentiator. We MUST use the high bits,
* since those are the ones that we first pay attention to when
* choosing the bucket.
*/
h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
return (h);
}
static int
zap_normalize(zap_t *zap, const char *name, char *namenorm, int normflags)
{
ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
size_t inlen = strlen(name) + 1;
size_t outlen = ZAP_MAXNAMELEN;
int err = 0;
(void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
normflags | U8_TEXTPREP_IGNORE_NULL | U8_TEXTPREP_IGNORE_INVALID,
U8_UNICODE_LATEST, &err);
return (err);
}
boolean_t
zap_match(zap_name_t *zn, const char *matchname)
{
ASSERT(!(zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY));
if (zn->zn_matchtype & MT_NORMALIZE) {
char norm[ZAP_MAXNAMELEN];
if (zap_normalize(zn->zn_zap, matchname, norm,
zn->zn_normflags) != 0)
return (B_FALSE);
return (strcmp(zn->zn_key_norm, norm) == 0);
} else {
return (strcmp(zn->zn_key_orig, matchname) == 0);
}
}
static zap_name_t *
zap_name_alloc(zap_t *zap)
{
zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
zn->zn_zap = zap;
return (zn);
}
void
zap_name_free(zap_name_t *zn)
{
kmem_free(zn, sizeof (zap_name_t));
}
static int
zap_name_init_str(zap_name_t *zn, const char *key, matchtype_t mt)
{
zap_t *zap = zn->zn_zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = key;
zn->zn_key_orig_numints = strlen(zn->zn_key_orig) + 1;
zn->zn_matchtype = mt;
zn->zn_normflags = zap->zap_normflags;
/*
* If we're dealing with a case sensitive lookup on a mixed or
* insensitive fs, remove U8_TEXTPREP_TOUPPER or the lookup
* will fold case to all caps overriding the lookup request.
*/
if (mt & MT_MATCH_CASE)
zn->zn_normflags &= ~U8_TEXTPREP_TOUPPER;
if (zap->zap_normflags) {
/*
* We *must* use zap_normflags because this normalization is
* what the hash is computed from.
*/
if (zap_normalize(zap, key, zn->zn_normbuf,
zap->zap_normflags) != 0)
return (SET_ERROR(ENOTSUP));
zn->zn_key_norm = zn->zn_normbuf;
zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
} else {
if (mt != 0)
return (SET_ERROR(ENOTSUP));
zn->zn_key_norm = zn->zn_key_orig;
zn->zn_key_norm_numints = zn->zn_key_orig_numints;
}
zn->zn_hash = zap_hash(zn);
if (zap->zap_normflags != zn->zn_normflags) {
/*
* We *must* use zn_normflags because this normalization is
* what the matching is based on. (Not the hash!)
*/
if (zap_normalize(zap, key, zn->zn_normbuf,
zn->zn_normflags) != 0)
return (SET_ERROR(ENOTSUP));
zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
}
return (0);
}
zap_name_t *
zap_name_alloc_str(zap_t *zap, const char *key, matchtype_t mt)
{
zap_name_t *zn = zap_name_alloc(zap);
if (zap_name_init_str(zn, key, mt) != 0) {
zap_name_free(zn);
return (NULL);
}
return (zn);
}
static zap_name_t *
zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
{
zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
ASSERT(zap->zap_normflags == 0);
zn->zn_zap = zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = zn->zn_key_norm = key;
zn->zn_key_orig_numints = zn->zn_key_norm_numints = numints;
zn->zn_matchtype = 0;
zn->zn_hash = zap_hash(zn);
return (zn);
}
static void
mzap_byteswap(mzap_phys_t *buf, size_t size)
{
buf->mz_block_type = BSWAP_64(buf->mz_block_type);
buf->mz_salt = BSWAP_64(buf->mz_salt);
buf->mz_normflags = BSWAP_64(buf->mz_normflags);
int max = (size / MZAP_ENT_LEN) - 1;
for (int i = 0; i < max; i++) {
buf->mz_chunk[i].mze_value =
BSWAP_64(buf->mz_chunk[i].mze_value);
buf->mz_chunk[i].mze_cd =
BSWAP_32(buf->mz_chunk[i].mze_cd);
}
}
void
zap_byteswap(void *buf, size_t size)
{
uint64_t block_type = *(uint64_t *)buf;
if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
/* ASSERT(magic == ZAP_LEAF_MAGIC); */
mzap_byteswap(buf, size);
} else {
fzap_byteswap(buf, size);
}
}
+__attribute__((always_inline)) inline
static int
mze_compare(const void *arg1, const void *arg2)
{
const mzap_ent_t *mze1 = arg1;
const mzap_ent_t *mze2 = arg2;
return (TREE_CMP((uint64_t)(mze1->mze_hash) << 32 | mze1->mze_cd,
(uint64_t)(mze2->mze_hash) << 32 | mze2->mze_cd));
}
+ZFS_BTREE_FIND_IN_BUF_FUNC(mze_find_in_buf, mzap_ent_t,
+ mze_compare)
+
static void
mze_insert(zap_t *zap, uint16_t chunkid, uint64_t hash)
{
mzap_ent_t mze;
ASSERT(zap->zap_ismicro);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
mze.mze_chunkid = chunkid;
ASSERT0(hash & 0xffffffff);
mze.mze_hash = hash >> 32;
ASSERT3U(MZE_PHYS(zap, &mze)->mze_cd, <=, 0xffff);
mze.mze_cd = (uint16_t)MZE_PHYS(zap, &mze)->mze_cd;
ASSERT(MZE_PHYS(zap, &mze)->mze_name[0] != 0);
zfs_btree_add(&zap->zap_m.zap_tree, &mze);
}
static mzap_ent_t *
mze_find(zap_name_t *zn, zfs_btree_index_t *idx)
{
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
zfs_btree_t *tree = &zn->zn_zap->zap_m.zap_tree;
ASSERT(zn->zn_zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock));
ASSERT0(zn->zn_hash & 0xffffffff);
mze_tofind.mze_hash = zn->zn_hash >> 32;
mze_tofind.mze_cd = 0;
mze = zfs_btree_find(tree, &mze_tofind, idx);
if (mze == NULL)
mze = zfs_btree_next(tree, idx, idx);
for (; mze && mze->mze_hash == mze_tofind.mze_hash;
mze = zfs_btree_next(tree, idx, idx)) {
ASSERT3U(mze->mze_cd, ==, MZE_PHYS(zn->zn_zap, mze)->mze_cd);
if (zap_match(zn, MZE_PHYS(zn->zn_zap, mze)->mze_name))
return (mze);
}
return (NULL);
}
static uint32_t
mze_find_unused_cd(zap_t *zap, uint64_t hash)
{
mzap_ent_t mze_tofind;
zfs_btree_index_t idx;
zfs_btree_t *tree = &zap->zap_m.zap_tree;
ASSERT(zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT0(hash & 0xffffffff);
hash >>= 32;
mze_tofind.mze_hash = hash;
mze_tofind.mze_cd = 0;
uint32_t cd = 0;
for (mzap_ent_t *mze = zfs_btree_find(tree, &mze_tofind, &idx);
mze && mze->mze_hash == hash;
mze = zfs_btree_next(tree, &idx, &idx)) {
if (mze->mze_cd != cd)
break;
cd++;
}
return (cd);
}
/*
* Each mzap entry requires at max : 4 chunks
* 3 chunks for names + 1 chunk for value.
*/
#define MZAP_ENT_CHUNKS (1 + ZAP_LEAF_ARRAY_NCHUNKS(MZAP_NAME_LEN) + \
ZAP_LEAF_ARRAY_NCHUNKS(sizeof (uint64_t)))
/*
* Check if the current entry keeps the colliding entries under the fatzap leaf
* size.
*/
static boolean_t
mze_canfit_fzap_leaf(zap_name_t *zn, uint64_t hash)
{
zap_t *zap = zn->zn_zap;
mzap_ent_t mze_tofind;
zfs_btree_index_t idx;
zfs_btree_t *tree = &zap->zap_m.zap_tree;
uint32_t mzap_ents = 0;
ASSERT0(hash & 0xffffffff);
hash >>= 32;
mze_tofind.mze_hash = hash;
mze_tofind.mze_cd = 0;
for (mzap_ent_t *mze = zfs_btree_find(tree, &mze_tofind, &idx);
mze && mze->mze_hash == hash;
mze = zfs_btree_next(tree, &idx, &idx)) {
mzap_ents++;
}
/* Include the new entry being added */
mzap_ents++;
return (ZAP_LEAF_NUMCHUNKS_DEF > (mzap_ents * MZAP_ENT_CHUNKS));
}
static void
mze_destroy(zap_t *zap)
{
zfs_btree_clear(&zap->zap_m.zap_tree);
zfs_btree_destroy(&zap->zap_m.zap_tree);
}
static zap_t *
mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
{
zap_t *winner;
uint64_t *zap_hdr = (uint64_t *)db->db_data;
uint64_t zap_block_type = zap_hdr[0];
uint64_t zap_magic = zap_hdr[1];
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
zap_t *zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&zap->zap_rwlock, RW_WRITER);
zap->zap_objset = os;
zap->zap_object = obj;
zap->zap_dbuf = db;
if (zap_block_type != ZBT_MICRO) {
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT,
0);
zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
if (zap_block_type != ZBT_HEADER || zap_magic != ZAP_MAGIC) {
winner = NULL; /* No actual winner here... */
goto handle_winner;
}
} else {
zap->zap_ismicro = TRUE;
}
/*
* Make sure that zap_ismicro is set before we let others see
* it, because zap_lockdir() checks zap_ismicro without the lock
* held.
*/
dmu_buf_init_user(&zap->zap_dbu, zap_evict_sync, NULL, &zap->zap_dbuf);
winner = dmu_buf_set_user(db, &zap->zap_dbu);
if (winner != NULL)
goto handle_winner;
if (zap->zap_ismicro) {
zap->zap_salt = zap_m_phys(zap)->mz_salt;
zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
/*
* Reduce B-tree leaf from 4KB to 512 bytes to reduce memmove()
* overhead on massive inserts below. It still allows to store
* 62 entries before we have to add 2KB B-tree core node.
*/
zfs_btree_create_custom(&zap->zap_m.zap_tree, mze_compare,
- sizeof (mzap_ent_t), 512);
+ mze_find_in_buf, sizeof (mzap_ent_t), 512);
zap_name_t *zn = zap_name_alloc(zap);
for (uint16_t i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze =
&zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0]) {
zap->zap_m.zap_num_entries++;
zap_name_init_str(zn, mze->mze_name, 0);
mze_insert(zap, i, zn->zn_hash);
}
}
zap_name_free(zn);
} else {
zap->zap_salt = zap_f_phys(zap)->zap_salt;
zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
ASSERT3U(sizeof (struct zap_leaf_header), ==,
2*ZAP_LEAF_CHUNKSIZE);
/*
* The embedded pointer table should not overlap the
* other members.
*/
ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
&zap_f_phys(zap)->zap_salt);
/*
* The embedded pointer table should end at the end of
* the block
*/
ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
(uintptr_t)zap_f_phys(zap), ==,
zap->zap_dbuf->db_size);
}
rw_exit(&zap->zap_rwlock);
return (zap);
handle_winner:
rw_exit(&zap->zap_rwlock);
rw_destroy(&zap->zap_rwlock);
if (!zap->zap_ismicro)
mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
kmem_free(zap, sizeof (zap_t));
return (winner);
}
/*
* This routine "consumes" the caller's hold on the dbuf, which must
* have the specified tag.
*/
static int
zap_lockdir_impl(dmu_buf_t *db, const void *tag, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
{
ASSERT0(db->db_offset);
objset_t *os = dmu_buf_get_objset(db);
uint64_t obj = db->db_object;
dmu_object_info_t doi;
*zapp = NULL;
dmu_object_info_from_db(db, &doi);
if (DMU_OT_BYTESWAP(doi.doi_type) != DMU_BSWAP_ZAP)
return (SET_ERROR(EINVAL));
zap_t *zap = dmu_buf_get_user(db);
if (zap == NULL) {
zap = mzap_open(os, obj, db);
if (zap == NULL) {
/*
* mzap_open() didn't like what it saw on-disk.
* Check for corruption!
*/
return (SET_ERROR(EIO));
}
}
/*
* We're checking zap_ismicro without the lock held, in order to
* tell what type of lock we want. Once we have some sort of
* lock, see if it really is the right type. In practice this
* can only be different if it was upgraded from micro to fat,
* and micro wanted WRITER but fat only needs READER.
*/
krw_t lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
rw_enter(&zap->zap_rwlock, lt);
if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
/* it was upgraded, now we only need reader */
ASSERT(lt == RW_WRITER);
ASSERT(RW_READER ==
((!zap->zap_ismicro && fatreader) ? RW_READER : lti));
rw_downgrade(&zap->zap_rwlock);
lt = RW_READER;
}
zap->zap_objset = os;
if (lt == RW_WRITER)
dmu_buf_will_dirty(db, tx);
ASSERT3P(zap->zap_dbuf, ==, db);
ASSERT(!zap->zap_ismicro ||
zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
if (zap->zap_ismicro && tx && adding &&
zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
if (newsz > zap_micro_max_size) {
dprintf("upgrading obj %llu: num_entries=%u\n",
(u_longlong_t)obj, zap->zap_m.zap_num_entries);
*zapp = zap;
int err = mzap_upgrade(zapp, tag, tx, 0);
if (err != 0)
rw_exit(&zap->zap_rwlock);
return (err);
}
VERIFY0(dmu_object_set_blocksize(os, obj, newsz, 0, tx));
zap->zap_m.zap_num_chunks =
db->db_size / MZAP_ENT_LEN - 1;
}
*zapp = zap;
return (0);
}
static int
zap_lockdir_by_dnode(dnode_t *dn, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, const void *tag,
zap_t **zapp)
{
dmu_buf_t *db;
int err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0) {
return (err);
}
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0) {
dmu_buf_rele(db, tag);
}
return (err);
}
int
zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, const void *tag,
zap_t **zapp)
{
dmu_buf_t *db;
int err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0)
dmu_buf_rele(db, tag);
return (err);
}
void
zap_unlockdir(zap_t *zap, const void *tag)
{
rw_exit(&zap->zap_rwlock);
dmu_buf_rele(zap->zap_dbuf, tag);
}
static int
mzap_upgrade(zap_t **zapp, const void *tag, dmu_tx_t *tx, zap_flags_t flags)
{
int err = 0;
zap_t *zap = *zapp;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
int sz = zap->zap_dbuf->db_size;
mzap_phys_t *mzp = vmem_alloc(sz, KM_SLEEP);
memcpy(mzp, zap->zap_dbuf->db_data, sz);
int nchunks = zap->zap_m.zap_num_chunks;
if (!flags) {
err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
1ULL << fzap_default_block_shift, 0, tx);
if (err != 0) {
vmem_free(mzp, sz);
return (err);
}
}
dprintf("upgrading obj=%llu with %u chunks\n",
(u_longlong_t)zap->zap_object, nchunks);
/* XXX destroy the tree later, so we can use the stored hash value */
mze_destroy(zap);
fzap_upgrade(zap, tx, flags);
zap_name_t *zn = zap_name_alloc(zap);
for (int i = 0; i < nchunks; i++) {
mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
if (mze->mze_name[0] == 0)
continue;
dprintf("adding %s=%llu\n",
mze->mze_name, (u_longlong_t)mze->mze_value);
zap_name_init_str(zn, mze->mze_name, 0);
/* If we fail here, we would end up losing entries */
VERIFY0(fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
tag, tx));
zap = zn->zn_zap; /* fzap_add_cd() may change zap */
}
zap_name_free(zn);
vmem_free(mzp, sz);
*zapp = zap;
return (0);
}
/*
* The "normflags" determine the behavior of the matchtype_t which is
* passed to zap_lookup_norm(). Names which have the same normalized
* version will be stored with the same hash value, and therefore we can
* perform normalization-insensitive lookups. We can be Unicode form-
* insensitive and/or case-insensitive. The following flags are valid for
* "normflags":
*
* U8_TEXTPREP_NFC
* U8_TEXTPREP_NFD
* U8_TEXTPREP_NFKC
* U8_TEXTPREP_NFKD
* U8_TEXTPREP_TOUPPER
*
* The *_NF* (Normalization Form) flags are mutually exclusive; at most one
* of them may be supplied.
*/
void
mzap_create_impl(dnode_t *dn, int normflags, zap_flags_t flags, dmu_tx_t *tx)
{
dmu_buf_t *db;
VERIFY0(dmu_buf_hold_by_dnode(dn, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db, tx);
mzap_phys_t *zp = db->db_data;
zp->mz_block_type = ZBT_MICRO;
zp->mz_salt =
((uintptr_t)db ^ (uintptr_t)tx ^ (dn->dn_object << 1)) | 1ULL;
zp->mz_normflags = normflags;
if (flags != 0) {
zap_t *zap;
/* Only fat zap supports flags; upgrade immediately. */
VERIFY0(zap_lockdir_impl(db, FTAG, tx, RW_WRITER,
B_FALSE, B_FALSE, &zap));
VERIFY0(mzap_upgrade(&zap, FTAG, tx, flags));
zap_unlockdir(zap, FTAG);
} else {
dmu_buf_rele(db, FTAG);
}
}
static uint64_t
zap_create_impl(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize,
dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx)
{
uint64_t obj;
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
if (allocated_dnode == NULL) {
dnode_t *dn;
obj = dmu_object_alloc_hold(os, ot, 1ULL << leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
&dn, FTAG, tx);
mzap_create_impl(dn, normflags, flags, tx);
dnode_rele(dn, FTAG);
} else {
obj = dmu_object_alloc_hold(os, ot, 1ULL << leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
allocated_dnode, tag, tx);
mzap_create_impl(*allocated_dnode, normflags, flags, tx);
}
return (obj);
}
int
zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_claim_dnsize(os, obj, ot, bonustype, bonuslen,
0, tx));
}
int
zap_create_claim_dnsize(objset_t *os, uint64_t obj, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_claim_norm_dnsize(os, obj,
0, ot, bonustype, bonuslen, dnodesize, tx));
}
int
zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_claim_norm_dnsize(os, obj, normflags, ot, bonustype,
bonuslen, 0, tx));
}
int
zap_create_claim_norm_dnsize(objset_t *os, uint64_t obj, int normflags,
dmu_object_type_t ot, dmu_object_type_t bonustype, int bonuslen,
int dnodesize, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
error = dmu_object_claim_dnsize(os, obj, ot, 0, bonustype, bonuslen,
dnodesize, tx);
if (error != 0)
return (error);
error = dnode_hold(os, obj, FTAG, &dn);
if (error != 0)
return (error);
mzap_create_impl(dn, normflags, 0, tx);
dnode_rele(dn, FTAG);
return (0);
}
uint64_t
zap_create(objset_t *os, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx));
}
uint64_t
zap_create_dnsize(objset_t *os, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_norm_dnsize(os, 0, ot, bonustype, bonuslen,
dnodesize, tx));
}
uint64_t
zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_norm_dnsize(os, normflags, ot, bonustype, bonuslen,
0, tx));
}
uint64_t
zap_create_norm_dnsize(objset_t *os, int normflags, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, 0, ot, 0, 0,
bonustype, bonuslen, dnodesize, NULL, NULL, tx));
}
uint64_t
zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_flags_dnsize(os, normflags, flags, ot,
leaf_blockshift, indirect_blockshift, bonustype, bonuslen, 0, tx));
}
uint64_t
zap_create_flags_dnsize(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, flags, ot, leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize, NULL, NULL,
tx));
}
/*
* Create a zap object and return a pointer to the newly allocated dnode via
* the allocated_dnode argument. The returned dnode will be held and the
* caller is responsible for releasing the hold by calling dnode_rele().
*/
uint64_t
zap_create_hold(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize,
dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, flags, ot, leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
allocated_dnode, tag, tx));
}
int
zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx)
{
/*
* dmu_object_free will free the object number and free the
* data. Freeing the data will cause our pageout function to be
* called, which will destroy our data (zap_leaf_t's and zap_t).
*/
return (dmu_object_free(os, zapobj, tx));
}
void
zap_evict_sync(void *dbu)
{
zap_t *zap = dbu;
rw_destroy(&zap->zap_rwlock);
if (zap->zap_ismicro)
mze_destroy(zap);
else
mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
kmem_free(zap, sizeof (zap_t));
}
int
zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
if (!zap->zap_ismicro) {
err = fzap_count(zap, count);
} else {
*count = zap->zap_m.zap_num_entries;
}
zap_unlockdir(zap, FTAG);
return (err);
}
/*
* zn may be NULL; if not specified, it will be computed if needed.
* See also the comment above zap_entry_normalization_conflict().
*/
static boolean_t
mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze,
zfs_btree_index_t *idx)
{
boolean_t allocdzn = B_FALSE;
mzap_ent_t *other;
zfs_btree_index_t oidx;
if (zap->zap_normflags == 0)
return (B_FALSE);
for (other = zfs_btree_prev(&zap->zap_m.zap_tree, idx, &oidx);
other && other->mze_hash == mze->mze_hash;
other = zfs_btree_prev(&zap->zap_m.zap_tree, &oidx, &oidx)) {
if (zn == NULL) {
zn = zap_name_alloc_str(zap,
MZE_PHYS(zap, mze)->mze_name, MT_NORMALIZE);
allocdzn = B_TRUE;
}
if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
if (allocdzn)
zap_name_free(zn);
return (B_TRUE);
}
}
for (other = zfs_btree_next(&zap->zap_m.zap_tree, idx, &oidx);
other && other->mze_hash == mze->mze_hash;
other = zfs_btree_next(&zap->zap_m.zap_tree, &oidx, &oidx)) {
if (zn == NULL) {
zn = zap_name_alloc_str(zap,
MZE_PHYS(zap, mze)->mze_name, MT_NORMALIZE);
allocdzn = B_TRUE;
}
if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
if (allocdzn)
zap_name_free(zn);
return (B_TRUE);
}
}
if (allocdzn)
zap_name_free(zn);
return (B_FALSE);
}
/*
* Routines for manipulating attributes.
*/
int
zap_lookup(objset_t *os, uint64_t zapobj, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf)
{
return (zap_lookup_norm(os, zapobj, name, integer_size,
num_integers, buf, 0, NULL, 0, NULL));
}
static int
zap_lookup_impl(zap_t *zap, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
int err = 0;
zap_name_t *zn = zap_name_alloc_str(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
if (!zap->zap_ismicro) {
err = fzap_lookup(zn, integer_size, num_integers, buf,
realname, rn_len, ncp);
} else {
zfs_btree_index_t idx;
mzap_ent_t *mze = mze_find(zn, &idx);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
if (num_integers < 1) {
err = SET_ERROR(EOVERFLOW);
} else if (integer_size != 8) {
err = SET_ERROR(EINVAL);
} else {
*(uint64_t *)buf =
MZE_PHYS(zap, mze)->mze_value;
if (realname != NULL)
(void) strlcpy(realname,
MZE_PHYS(zap, mze)->mze_name,
rn_len);
if (ncp) {
*ncp = mzap_normalization_conflict(zap,
zn, mze, &idx);
}
}
}
}
zap_name_free(zn);
return (err);
}
int
zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_lookup_impl(zap, name, integer_size,
num_integers, buf, mt, realname, rn_len, ncp);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_prefetch(objset_t *os, uint64_t zapobj, const char *name)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
zn = zap_name_alloc_str(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_lookup_by_dnode(dnode_t *dn, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf)
{
return (zap_lookup_norm_by_dnode(dn, name, integer_size,
num_integers, buf, 0, NULL, 0, NULL));
}
int
zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
zap_t *zap;
int err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
FTAG, &zap);
if (err != 0)
return (err);
err = zap_lookup_impl(zap, name, integer_size,
num_integers, buf, mt, realname, rn_len, ncp);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_lookup(zn, integer_size, num_integers, buf,
NULL, 0, NULL);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_contains(objset_t *os, uint64_t zapobj, const char *name)
{
int err = zap_lookup_norm(os, zapobj, name, 0,
0, NULL, 0, NULL, 0, NULL);
if (err == EOVERFLOW || err == EINVAL)
err = 0; /* found, but skipped reading the value */
return (err);
}
int
zap_length(objset_t *os, uint64_t zapobj, const char *name,
uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_str(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_length(zn, integer_size, num_integers);
} else {
zfs_btree_index_t idx;
mzap_ent_t *mze = mze_find(zn, &idx);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
if (integer_size)
*integer_size = 8;
if (num_integers)
*num_integers = 1;
}
}
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_length(zn, integer_size, num_integers);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
static void
mzap_addent(zap_name_t *zn, uint64_t value)
{
zap_t *zap = zn->zn_zap;
uint16_t start = zap->zap_m.zap_alloc_next;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
#ifdef ZFS_DEBUG
for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
}
#endif
uint32_t cd = mze_find_unused_cd(zap, zn->zn_hash);
/* given the limited size of the microzap, this can't happen */
ASSERT(cd < zap_maxcd(zap));
again:
for (uint16_t i = start; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0] == 0) {
mze->mze_value = value;
mze->mze_cd = cd;
(void) strlcpy(mze->mze_name, zn->zn_key_orig,
sizeof (mze->mze_name));
zap->zap_m.zap_num_entries++;
zap->zap_m.zap_alloc_next = i+1;
if (zap->zap_m.zap_alloc_next ==
zap->zap_m.zap_num_chunks)
zap->zap_m.zap_alloc_next = 0;
mze_insert(zap, i, zn->zn_hash);
return;
}
}
if (start != 0) {
start = 0;
goto again;
}
cmn_err(CE_PANIC, "out of entries!");
}
static int
zap_add_impl(zap_t *zap, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx, const void *tag)
{
const uint64_t *intval = val;
int err = 0;
zap_name_t *zn = zap_name_alloc_str(zap, key, 0);
if (zn == NULL) {
zap_unlockdir(zap, tag);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
strlen(key) >= MZAP_NAME_LEN ||
!mze_canfit_fzap_leaf(zn, zn->zn_hash)) {
err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
if (err == 0) {
err = fzap_add(zn, integer_size, num_integers, val,
tag, tx);
}
zap = zn->zn_zap; /* fzap_add() may change zap */
} else {
zfs_btree_index_t idx;
if (mze_find(zn, &idx) != NULL) {
err = SET_ERROR(EEXIST);
} else {
mzap_addent(zn, *intval);
}
}
ASSERT(zap == zn->zn_zap);
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_add() failed */
zap_unlockdir(zap, tag);
return (err);
}
int
zap_add(objset_t *os, uint64_t zapobj, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
/* zap_add_impl() calls zap_unlockdir() */
return (err);
}
int
zap_add_by_dnode(dnode_t *dn, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
/* zap_add_impl() calls zap_unlockdir() */
return (err);
}
int
zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_add(zn, integer_size, num_integers, val, FTAG, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_add() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_update(objset_t *os, uint64_t zapobj, const char *name,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
const uint64_t *intval = val;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_str(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_update(zn, integer_size, num_integers, val,
FTAG, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
strlen(name) >= MZAP_NAME_LEN) {
dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
(u_longlong_t)zapobj, integer_size,
(u_longlong_t)num_integers, name);
err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
if (err == 0) {
err = fzap_update(zn, integer_size, num_integers,
val, FTAG, tx);
}
zap = zn->zn_zap; /* fzap_update() may change zap */
} else {
zfs_btree_index_t idx;
mzap_ent_t *mze = mze_find(zn, &idx);
if (mze != NULL) {
MZE_PHYS(zap, mze)->mze_value = *intval;
} else {
mzap_addent(zn, *intval);
}
}
ASSERT(zap == zn->zn_zap);
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_update(zn, integer_size, num_integers, val, FTAG, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
{
return (zap_remove_norm(os, zapobj, name, 0, tx));
}
static int
zap_remove_impl(zap_t *zap, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
int err = 0;
zap_name_t *zn = zap_name_alloc_str(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
zfs_btree_index_t idx;
mzap_ent_t *mze = mze_find(zn, &idx);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
zap->zap_m.zap_num_entries--;
memset(MZE_PHYS(zap, mze), 0, sizeof (mzap_ent_phys_t));
zfs_btree_remove_idx(&zap->zap_m.zap_tree, &idx);
}
}
zap_name_free(zn);
return (err);
}
int
zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
err = zap_remove_impl(zap, name, mt, tx);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
err = zap_remove_impl(zap, name, 0, tx);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_remove(zn, tx);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
/*
* Routines for iterating over the attributes.
*/
static void
zap_cursor_init_impl(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
uint64_t serialized, boolean_t prefetch)
{
zc->zc_objset = os;
zc->zc_zap = NULL;
zc->zc_leaf = NULL;
zc->zc_zapobj = zapobj;
zc->zc_serialized = serialized;
zc->zc_hash = 0;
zc->zc_cd = 0;
zc->zc_prefetch = prefetch;
}
void
zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
uint64_t serialized)
{
zap_cursor_init_impl(zc, os, zapobj, serialized, B_TRUE);
}
/*
* Initialize a cursor at the beginning of the ZAP object. The entire
* ZAP object will be prefetched.
*/
void
zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
{
zap_cursor_init_impl(zc, os, zapobj, 0, B_TRUE);
}
/*
* Initialize a cursor at the beginning, but request that we not prefetch
* the entire ZAP object.
*/
void
zap_cursor_init_noprefetch(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
{
zap_cursor_init_impl(zc, os, zapobj, 0, B_FALSE);
}
void
zap_cursor_fini(zap_cursor_t *zc)
{
if (zc->zc_zap) {
rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
zap_unlockdir(zc->zc_zap, NULL);
zc->zc_zap = NULL;
}
if (zc->zc_leaf) {
rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
zap_put_leaf(zc->zc_leaf);
zc->zc_leaf = NULL;
}
zc->zc_objset = NULL;
}
uint64_t
zap_cursor_serialize(zap_cursor_t *zc)
{
if (zc->zc_hash == -1ULL)
return (-1ULL);
if (zc->zc_zap == NULL)
return (zc->zc_serialized);
ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
/*
* We want to keep the high 32 bits of the cursor zero if we can, so
* that 32-bit programs can access this. So usually use a small
* (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
* of the cursor.
*
* [ collision differentiator | zap_hashbits()-bit hash value ]
*/
return ((zc->zc_hash >> (64 - zap_hashbits(zc->zc_zap))) |
((uint64_t)zc->zc_cd << zap_hashbits(zc->zc_zap)));
}
int
zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
{
int err;
if (zc->zc_hash == -1ULL)
return (SET_ERROR(ENOENT));
if (zc->zc_zap == NULL) {
int hb;
err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
RW_READER, TRUE, FALSE, NULL, &zc->zc_zap);
if (err != 0)
return (err);
/*
* To support zap_cursor_init_serialized, advance, retrieve,
* we must add to the existing zc_cd, which may already
* be 1 due to the zap_cursor_advance.
*/
ASSERT(zc->zc_hash == 0);
hb = zap_hashbits(zc->zc_zap);
zc->zc_hash = zc->zc_serialized << (64 - hb);
zc->zc_cd += zc->zc_serialized >> hb;
if (zc->zc_cd >= zap_maxcd(zc->zc_zap)) /* corrupt serialized */
zc->zc_cd = 0;
} else {
rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
}
if (!zc->zc_zap->zap_ismicro) {
err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
} else {
zfs_btree_index_t idx;
mzap_ent_t mze_tofind;
mze_tofind.mze_hash = zc->zc_hash >> 32;
mze_tofind.mze_cd = zc->zc_cd;
mzap_ent_t *mze = zfs_btree_find(&zc->zc_zap->zap_m.zap_tree,
&mze_tofind, &idx);
if (mze == NULL) {
mze = zfs_btree_next(&zc->zc_zap->zap_m.zap_tree,
&idx, &idx);
}
if (mze) {
mzap_ent_phys_t *mzep = MZE_PHYS(zc->zc_zap, mze);
ASSERT3U(mze->mze_cd, ==, mzep->mze_cd);
za->za_normalization_conflict =
mzap_normalization_conflict(zc->zc_zap, NULL,
mze, &idx);
za->za_integer_length = 8;
za->za_num_integers = 1;
za->za_first_integer = mzep->mze_value;
(void) strlcpy(za->za_name, mzep->mze_name,
sizeof (za->za_name));
zc->zc_hash = (uint64_t)mze->mze_hash << 32;
zc->zc_cd = mze->mze_cd;
err = 0;
} else {
zc->zc_hash = -1ULL;
err = SET_ERROR(ENOENT);
}
}
rw_exit(&zc->zc_zap->zap_rwlock);
return (err);
}
void
zap_cursor_advance(zap_cursor_t *zc)
{
if (zc->zc_hash == -1ULL)
return;
zc->zc_cd++;
}
int
zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
memset(zs, 0, sizeof (zap_stats_t));
if (zap->zap_ismicro) {
zs->zs_blocksize = zap->zap_dbuf->db_size;
zs->zs_num_entries = zap->zap_m.zap_num_entries;
zs->zs_num_blocks = 1;
} else {
fzap_get_stats(zap, zs);
}
zap_unlockdir(zap, FTAG);
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zap_create);
EXPORT_SYMBOL(zap_create_dnsize);
EXPORT_SYMBOL(zap_create_norm);
EXPORT_SYMBOL(zap_create_norm_dnsize);
EXPORT_SYMBOL(zap_create_flags);
EXPORT_SYMBOL(zap_create_flags_dnsize);
EXPORT_SYMBOL(zap_create_claim);
EXPORT_SYMBOL(zap_create_claim_norm);
EXPORT_SYMBOL(zap_create_claim_norm_dnsize);
EXPORT_SYMBOL(zap_create_hold);
EXPORT_SYMBOL(zap_destroy);
EXPORT_SYMBOL(zap_lookup);
EXPORT_SYMBOL(zap_lookup_by_dnode);
EXPORT_SYMBOL(zap_lookup_norm);
EXPORT_SYMBOL(zap_lookup_uint64);
EXPORT_SYMBOL(zap_contains);
EXPORT_SYMBOL(zap_prefetch);
EXPORT_SYMBOL(zap_prefetch_uint64);
EXPORT_SYMBOL(zap_add);
EXPORT_SYMBOL(zap_add_by_dnode);
EXPORT_SYMBOL(zap_add_uint64);
EXPORT_SYMBOL(zap_update);
EXPORT_SYMBOL(zap_update_uint64);
EXPORT_SYMBOL(zap_length);
EXPORT_SYMBOL(zap_length_uint64);
EXPORT_SYMBOL(zap_remove);
EXPORT_SYMBOL(zap_remove_by_dnode);
EXPORT_SYMBOL(zap_remove_norm);
EXPORT_SYMBOL(zap_remove_uint64);
EXPORT_SYMBOL(zap_count);
EXPORT_SYMBOL(zap_value_search);
EXPORT_SYMBOL(zap_join);
EXPORT_SYMBOL(zap_join_increment);
EXPORT_SYMBOL(zap_add_int);
EXPORT_SYMBOL(zap_remove_int);
EXPORT_SYMBOL(zap_lookup_int);
EXPORT_SYMBOL(zap_increment_int);
EXPORT_SYMBOL(zap_add_int_key);
EXPORT_SYMBOL(zap_lookup_int_key);
EXPORT_SYMBOL(zap_increment);
EXPORT_SYMBOL(zap_cursor_init);
EXPORT_SYMBOL(zap_cursor_fini);
EXPORT_SYMBOL(zap_cursor_retrieve);
EXPORT_SYMBOL(zap_cursor_advance);
EXPORT_SYMBOL(zap_cursor_serialize);
EXPORT_SYMBOL(zap_cursor_init_serialized);
EXPORT_SYMBOL(zap_get_stats);
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zap_micro_max_size, INT, ZMOD_RW,
"Maximum micro ZAP size, before converting to a fat ZAP, in bytes");
#endif
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fm.c b/sys/contrib/openzfs/module/zfs/zfs_fm.c
index bdd0e96c327a..c42ef048dd74 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fm.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fm.c
@@ -1,1598 +1,1597 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012,2021 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/sysevent.h>
/*
* This general routine is responsible for generating all the different ZFS
* ereports. The payload is dependent on the class, and which arguments are
* supplied to the function:
*
* EREPORT POOL VDEV IO
* block X X X
* data X X
* device X X
* pool X
*
* If we are in a loading state, all errors are chained together by the same
* SPA-wide ENA (Error Numeric Association).
*
* For isolated I/O requests, we get the ENA from the zio_t. The propagation
* gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
* to chain together all ereports associated with a logical piece of data. For
* read I/Os, there are basically three 'types' of I/O, which form a roughly
* layered diagram:
*
* +---------------+
* | Aggregate I/O | No associated logical data or device
* +---------------+
* |
* V
* +---------------+ Reads associated with a piece of logical data.
* | Read I/O | This includes reads on behalf of RAID-Z,
* +---------------+ mirrors, gang blocks, retries, etc.
* |
* V
* +---------------+ Reads associated with a particular device, but
* | Physical I/O | no logical data. Issued as part of vdev caching
* +---------------+ and I/O aggregation.
*
* Note that 'physical I/O' here is not the same terminology as used in the rest
* of ZIO. Typically, 'physical I/O' simply means that there is no attached
* blockpointer. But I/O with no associated block pointer can still be related
* to a logical piece of data (i.e. RAID-Z requests).
*
* Purely physical I/O always have unique ENAs. They are not related to a
* particular piece of logical data, and therefore cannot be chained together.
* We still generate an ereport, but the DE doesn't correlate it with any
* logical piece of data. When such an I/O fails, the delegated I/O requests
* will issue a retry, which will trigger the 'real' ereport with the correct
* ENA.
*
* We keep track of the ENA for a ZIO chain through the 'io_logical' member.
* When a new logical I/O is issued, we set this to point to itself. Child I/Os
* then inherit this pointer, so that when it is first set subsequent failures
* will use the same ENA. For vdev cache fill and queue aggregation I/O,
* this pointer is set to NULL, and no ereport will be generated (since it
* doesn't actually correspond to any particular device or piece of data,
* and the caller will always retry without caching or queueing anyway).
*
* For checksum errors, we want to include more information about the actual
* error which occurs. Accordingly, we build an ereport when the error is
* noticed, but instead of sending it in immediately, we hang it off of the
* io_cksum_report field of the logical IO. When the logical IO completes
* (successfully or not), zfs_ereport_finish_checksum() is called with the
* good and bad versions of the buffer (if available), and we annotate the
* ereport with information about the differences.
*/
#ifdef _KERNEL
/*
* Duplicate ereport Detection
*
* Some ereports are retained momentarily for detecting duplicates. These
* are kept in a recent_events_node_t in both a time-ordered list and an AVL
* tree of recent unique ereports.
*
* The lifespan of these recent ereports is bounded (15 mins) and a cleaner
* task is used to purge stale entries.
*/
static list_t recent_events_list;
static avl_tree_t recent_events_tree;
static kmutex_t recent_events_lock;
static taskqid_t recent_events_cleaner_tqid;
/*
* Each node is about 128 bytes so 2,000 would consume 1/4 MiB.
*
* This setting can be changed dynamically and setting it to zero
* disables duplicate detection.
*/
static unsigned int zfs_zevent_retain_max = 2000;
/*
* The lifespan for a recent ereport entry. The default of 15 minutes is
* intended to outlive the zfs diagnosis engine's threshold of 10 errors
* over a period of 10 minutes.
*/
static unsigned int zfs_zevent_retain_expire_secs = 900;
typedef enum zfs_subclass {
ZSC_IO,
ZSC_DATA,
ZSC_CHECKSUM
} zfs_subclass_t;
typedef struct {
/* common criteria */
uint64_t re_pool_guid;
uint64_t re_vdev_guid;
int re_io_error;
uint64_t re_io_size;
uint64_t re_io_offset;
zfs_subclass_t re_subclass;
zio_priority_t re_io_priority;
/* logical zio criteria (optional) */
zbookmark_phys_t re_io_bookmark;
/* internal state */
avl_node_t re_tree_link;
list_node_t re_list_link;
uint64_t re_timestamp;
} recent_events_node_t;
static int
recent_events_compare(const void *a, const void *b)
{
const recent_events_node_t *node1 = a;
const recent_events_node_t *node2 = b;
int cmp;
/*
* The comparison order here is somewhat arbitrary.
* What's important is that if every criteria matches, then it
* is a duplicate (i.e. compare returns 0)
*/
if ((cmp = TREE_CMP(node1->re_subclass, node2->re_subclass)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_pool_guid, node2->re_pool_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_vdev_guid, node2->re_vdev_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_error, node2->re_io_error)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_priority, node2->re_io_priority)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_size, node2->re_io_size)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_offset, node2->re_io_offset)) != 0)
return (cmp);
const zbookmark_phys_t *zb1 = &node1->re_io_bookmark;
const zbookmark_phys_t *zb2 = &node2->re_io_bookmark;
if ((cmp = TREE_CMP(zb1->zb_objset, zb2->zb_objset)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_object, zb2->zb_object)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_level, zb2->zb_level)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_blkid, zb2->zb_blkid)) != 0)
return (cmp);
return (0);
}
/*
* workaround: vdev properties don't have inheritance
*/
static uint64_t
vdev_prop_get_inherited(vdev_t *vd, vdev_prop_t prop)
{
uint64_t propdef, propval;
propdef = vdev_prop_default_numeric(prop);
switch (prop) {
case VDEV_PROP_CHECKSUM_N:
propval = vd->vdev_checksum_n;
break;
case VDEV_PROP_CHECKSUM_T:
propval = vd->vdev_checksum_t;
break;
case VDEV_PROP_IO_N:
propval = vd->vdev_io_n;
break;
case VDEV_PROP_IO_T:
propval = vd->vdev_io_t;
break;
default:
propval = propdef;
break;
}
if (propval != propdef)
return (propval);
if (vd->vdev_parent == NULL)
return (propdef);
return (vdev_prop_get_inherited(vd->vdev_parent, prop));
}
static void zfs_ereport_schedule_cleaner(void);
/*
* background task to clean stale recent event nodes.
*/
static void
zfs_ereport_cleaner(void *arg)
{
recent_events_node_t *entry;
uint64_t now = gethrtime();
/*
* purge expired entries
*/
mutex_enter(&recent_events_lock);
while ((entry = list_tail(&recent_events_list)) != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
if (age <= zfs_zevent_retain_expire_secs)
break;
/* remove expired node */
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
/* Restart the cleaner if more entries remain */
recent_events_cleaner_tqid = 0;
if (!list_is_empty(&recent_events_list))
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
}
static void
zfs_ereport_schedule_cleaner(void)
{
ASSERT(MUTEX_HELD(&recent_events_lock));
uint64_t timeout = SEC2NSEC(zfs_zevent_retain_expire_secs + 1);
recent_events_cleaner_tqid = taskq_dispatch_delay(
system_delay_taskq, zfs_ereport_cleaner, NULL, TQ_SLEEP,
ddi_get_lbolt() + NSEC_TO_TICK(timeout));
}
/*
* Clear entries for a given vdev or all vdevs in a pool when vdev == NULL
*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
uint64_t vdev_guid, pool_guid;
ASSERT(vd != NULL || spa != NULL);
if (vd == NULL) {
vdev_guid = 0;
pool_guid = spa_guid(spa);
} else {
vdev_guid = vd->vdev_guid;
pool_guid = 0;
}
mutex_enter(&recent_events_lock);
recent_events_node_t *next = list_head(&recent_events_list);
while (next != NULL) {
recent_events_node_t *entry = next;
next = list_next(&recent_events_list, next);
if (entry->re_vdev_guid == vdev_guid ||
entry->re_pool_guid == pool_guid) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
}
mutex_exit(&recent_events_lock);
}
/*
* Check if an ereport would be a duplicate of one recently posted.
*
* An ereport is considered a duplicate if the set of criteria in
* recent_events_node_t all match.
*
* Only FM_EREPORT_ZFS_IO, FM_EREPORT_ZFS_DATA, and FM_EREPORT_ZFS_CHECKSUM
* are candidates for duplicate checking.
*/
static boolean_t
zfs_ereport_is_duplicate(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t offset, uint64_t size)
{
recent_events_node_t search = {0}, *entry;
if (vd == NULL || zio == NULL)
return (B_FALSE);
if (zfs_zevent_retain_max == 0)
return (B_FALSE);
if (strcmp(subclass, FM_EREPORT_ZFS_IO) == 0)
search.re_subclass = ZSC_IO;
else if (strcmp(subclass, FM_EREPORT_ZFS_DATA) == 0)
search.re_subclass = ZSC_DATA;
else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0)
search.re_subclass = ZSC_CHECKSUM;
else
return (B_FALSE);
search.re_pool_guid = spa_guid(spa);
search.re_vdev_guid = vd->vdev_guid;
search.re_io_error = zio->io_error;
search.re_io_priority = zio->io_priority;
/* if size is supplied use it over what's in zio */
if (size) {
search.re_io_size = size;
search.re_io_offset = offset;
} else {
search.re_io_size = zio->io_size;
search.re_io_offset = zio->io_offset;
}
/* grab optional logical zio criteria */
if (zb != NULL) {
search.re_io_bookmark.zb_objset = zb->zb_objset;
search.re_io_bookmark.zb_object = zb->zb_object;
search.re_io_bookmark.zb_level = zb->zb_level;
search.re_io_bookmark.zb_blkid = zb->zb_blkid;
}
uint64_t now = gethrtime();
mutex_enter(&recent_events_lock);
/* check if we have seen this one recently */
entry = avl_find(&recent_events_tree, &search, NULL);
if (entry != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
/*
* There is still an active cleaner (since we're here).
* Reset the last seen time for this duplicate entry
* so that its lifespand gets extended.
*/
list_remove(&recent_events_list, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
zfs_zevent_track_duplicate();
mutex_exit(&recent_events_lock);
return (age <= zfs_zevent_retain_expire_secs);
}
if (avl_numnodes(&recent_events_tree) >= zfs_zevent_retain_max) {
/* recycle oldest node */
entry = list_tail(&recent_events_list);
ASSERT(entry != NULL);
list_remove(&recent_events_list, entry);
avl_remove(&recent_events_tree, entry);
} else {
entry = kmem_alloc(sizeof (recent_events_node_t), KM_SLEEP);
}
/* record this as a recent ereport */
*entry = search;
avl_add(&recent_events_tree, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
/* Start a cleaner if not already scheduled */
if (recent_events_cleaner_tqid == 0)
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
return (B_FALSE);
}
void
zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
{
if (nvl)
fm_nvlist_destroy(nvl, FM_NVA_FREE);
if (detector)
fm_nvlist_destroy(detector, FM_NVA_FREE);
}
/*
* We want to rate limit ZIO delay, deadman, and checksum events so as to not
* flood zevent consumers when a disk is acting up.
*
* Returns 1 if we're ratelimiting, 0 if not.
*/
static int
zfs_is_ratelimiting_event(const char *subclass, vdev_t *vd)
{
int rc = 0;
/*
* zfs_ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
* are. Invert it to get our return value.
*/
if (strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) {
rc = !zfs_ratelimit(&vd->vdev_delay_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_DEADMAN) == 0) {
rc = !zfs_ratelimit(&vd->vdev_deadman_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
rc = !zfs_ratelimit(&vd->vdev_checksum_rl);
}
if (rc) {
/* We're rate limiting */
fm_erpt_dropped_increment();
}
return (rc);
}
/*
* Return B_TRUE if the event actually posted, B_FALSE if not.
*/
static boolean_t
zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
const char *subclass, spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
zio_t *zio, uint64_t stateoroffset, uint64_t size)
{
nvlist_t *ereport, *detector;
uint64_t ena;
char class[64];
if ((ereport = fm_nvlist_create(NULL)) == NULL)
return (B_FALSE);
if ((detector = fm_nvlist_create(NULL)) == NULL) {
fm_nvlist_destroy(ereport, FM_NVA_FREE);
return (B_FALSE);
}
/*
* Serialize ereport generation
*/
mutex_enter(&spa->spa_errlist_lock);
/*
* Determine the ENA to use for this event. If we are in a loading
* state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
* a root zio-wide ENA. Otherwise, simply use a unique ENA.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE) {
if (spa->spa_ena == 0)
spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
ena = spa->spa_ena;
} else if (zio != NULL && zio->io_logical != NULL) {
if (zio->io_logical->io_ena == 0)
zio->io_logical->io_ena =
fm_ena_generate(0, FM_ENA_FMT1);
ena = zio->io_logical->io_ena;
} else {
ena = fm_ena_generate(0, FM_ENA_FMT1);
}
/*
* Construct the full class, detector, and other standard FMA fields.
*/
(void) snprintf(class, sizeof (class), "%s.%s",
ZFS_ERROR_CLASS, subclass);
fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
vd != NULL ? vd->vdev_guid : 0);
fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
/*
* Construct the per-ereport payload, depending on which parameters are
* passed in.
*/
/*
* Generic payload members common to all ereports.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_POOL, DATA_TYPE_STRING, spa_name(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, DATA_TYPE_UINT64, spa_guid(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, DATA_TYPE_UINT64,
(uint64_t)spa_state(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
(int32_t)spa_load_state(spa), NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
DATA_TYPE_STRING,
spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
FM_EREPORT_FAILMODE_WAIT :
spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
NULL);
if (vd != NULL) {
vdev_t *pvd = vd->vdev_parent;
vdev_queue_t *vq = &vd->vdev_queue;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_t *spare_vd;
uint64_t *spare_guids;
char **spare_paths;
int i, spare_count;
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
DATA_TYPE_UINT64, vd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
if (vd->vdev_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
DATA_TYPE_STRING, vd->vdev_path, NULL);
if (vd->vdev_devid != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
DATA_TYPE_STRING, vd->vdev_devid, NULL);
if (vd->vdev_fru != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
DATA_TYPE_STRING, vd->vdev_fru, NULL);
if (vd->vdev_enc_sysfs_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
DATA_TYPE_STRING, vd->vdev_enc_sysfs_path, NULL);
if (vd->vdev_ashift)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT,
DATA_TYPE_UINT64, vd->vdev_ashift, NULL);
if (vq != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS,
DATA_TYPE_UINT64, vq->vq_io_complete_ts, NULL);
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS,
DATA_TYPE_UINT64, vq->vq_io_delta_ts, NULL);
}
if (vs != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS,
DATA_TYPE_UINT64, vs->vs_read_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS,
DATA_TYPE_UINT64, vs->vs_write_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS,
DATA_TYPE_UINT64, vs->vs_checksum_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS,
DATA_TYPE_UINT64, vs->vs_slow_ios,
NULL);
}
if (pvd != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
DATA_TYPE_UINT64, pvd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
NULL);
if (pvd->vdev_path)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
DATA_TYPE_STRING, pvd->vdev_path, NULL);
if (pvd->vdev_devid)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
DATA_TYPE_STRING, pvd->vdev_devid, NULL);
}
spare_count = spa->spa_spares.sav_count;
spare_paths = kmem_zalloc(sizeof (char *) * spare_count,
KM_SLEEP);
spare_guids = kmem_zalloc(sizeof (uint64_t) * spare_count,
KM_SLEEP);
for (i = 0; i < spare_count; i++) {
spare_vd = spa->spa_spares.sav_vdevs[i];
if (spare_vd) {
spare_paths[i] = spare_vd->vdev_path;
spare_guids[i] = spare_vd->vdev_guid;
}
}
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS,
DATA_TYPE_STRING_ARRAY, spare_count, spare_paths,
FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS,
DATA_TYPE_UINT64_ARRAY, spare_count, spare_guids, NULL);
kmem_free(spare_guids, sizeof (uint64_t) * spare_count);
kmem_free(spare_paths, sizeof (char *) * spare_count);
}
if (zio != NULL) {
/*
* Payload common to all I/Os.
*/
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
DATA_TYPE_INT32, zio->io_error, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
DATA_TYPE_INT32, zio->io_flags, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE,
DATA_TYPE_UINT32, zio->io_stage, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE,
DATA_TYPE_UINT32, zio->io_pipeline, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
DATA_TYPE_UINT64, zio->io_delay, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP,
DATA_TYPE_UINT64, zio->io_timestamp, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA,
DATA_TYPE_UINT64, zio->io_delta, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY,
DATA_TYPE_UINT32, zio->io_priority, NULL);
/*
* If the 'size' parameter is non-zero, it indicates this is a
* RAID-Z or other I/O where the physical offset and length are
* provided for us, instead of within the zio_t.
*/
if (vd != NULL) {
if (size)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, stateoroffset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, size, NULL);
else
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, zio->io_offset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, zio->io_size, NULL);
}
} else if (vd != NULL) {
/*
* If we have a vdev but no zio, this is a device fault, and the
* 'stateoroffset' parameter indicates the previous state of the
* vdev.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
DATA_TYPE_UINT64, stateoroffset, NULL);
}
/*
* Payload for I/Os with corresponding logical information.
*/
if (zb != NULL && (zio == NULL || zio->io_logical != NULL)) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
DATA_TYPE_UINT64, zb->zb_objset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
DATA_TYPE_UINT64, zb->zb_object,
FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
DATA_TYPE_INT64, zb->zb_level,
FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
DATA_TYPE_UINT64, zb->zb_blkid, NULL);
}
/*
* Payload for tuning the zed
*/
if (vd != NULL && strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
uint64_t cksum_n, cksum_t;
cksum_n = vdev_prop_get_inherited(vd, VDEV_PROP_CHECKSUM_N);
if (cksum_n != vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_N,
DATA_TYPE_UINT64,
cksum_n,
NULL);
cksum_t = vdev_prop_get_inherited(vd, VDEV_PROP_CHECKSUM_T);
if (cksum_t != vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_T,
DATA_TYPE_UINT64,
cksum_t,
NULL);
}
if (vd != NULL && strcmp(subclass, FM_EREPORT_ZFS_IO) == 0) {
uint64_t io_n, io_t;
io_n = vdev_prop_get_inherited(vd, VDEV_PROP_IO_N);
if (io_n != vdev_prop_default_numeric(VDEV_PROP_IO_N))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_N,
DATA_TYPE_UINT64,
io_n,
NULL);
io_t = vdev_prop_get_inherited(vd, VDEV_PROP_IO_T);
if (io_t != vdev_prop_default_numeric(VDEV_PROP_IO_T))
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_IO_T,
DATA_TYPE_UINT64,
io_t,
NULL);
}
mutex_exit(&spa->spa_errlist_lock);
*ereport_out = ereport;
*detector_out = detector;
return (B_TRUE);
}
/* if it's <= 128 bytes, save the corruption directly */
#define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
#define MAX_RANGES 16
typedef struct zfs_ecksum_info {
/* histograms of set and cleared bits by bit number in a 64-bit word */
uint8_t zei_histogram_set[sizeof (uint64_t) * NBBY];
uint8_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
/* inline arrays of bits set and cleared. */
uint64_t zei_bits_set[ZFM_MAX_INLINE];
uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
/*
* for each range, the number of bits set and cleared. The Hamming
* distance between the good and bad buffers is the sum of them all.
*/
uint32_t zei_range_sets[MAX_RANGES];
uint32_t zei_range_clears[MAX_RANGES];
struct zei_ranges {
uint32_t zr_start;
uint32_t zr_end;
} zei_ranges[MAX_RANGES];
size_t zei_range_count;
uint32_t zei_mingap;
uint32_t zei_allowed_mingap;
} zfs_ecksum_info_t;
static void
update_histogram(uint64_t value_arg, uint8_t *hist, uint32_t *count)
{
size_t i;
size_t bits = 0;
uint64_t value = BE_64(value_arg);
/* We store the bits in big-endian (largest-first) order */
for (i = 0; i < 64; i++) {
if (value & (1ull << i)) {
hist[63 - i]++;
++bits;
}
}
/* update the count of bits changed */
*count += bits;
}
/*
* We've now filled up the range array, and need to increase "mingap" and
* shrink the range list accordingly. zei_mingap is always the smallest
* distance between array entries, so we set the new_allowed_gap to be
* one greater than that. We then go through the list, joining together
* any ranges which are closer than the new_allowed_gap.
*
* By construction, there will be at least one. We also update zei_mingap
* to the new smallest gap, to prepare for our next invocation.
*/
static void
zei_shrink_ranges(zfs_ecksum_info_t *eip)
{
uint32_t mingap = UINT32_MAX;
uint32_t new_allowed_gap = eip->zei_mingap + 1;
size_t idx, output;
size_t max = eip->zei_range_count;
struct zei_ranges *r = eip->zei_ranges;
ASSERT3U(eip->zei_range_count, >, 0);
ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
output = idx = 0;
while (idx < max - 1) {
uint32_t start = r[idx].zr_start;
uint32_t end = r[idx].zr_end;
while (idx < max - 1) {
idx++;
uint32_t nstart = r[idx].zr_start;
uint32_t nend = r[idx].zr_end;
uint32_t gap = nstart - end;
if (gap < new_allowed_gap) {
end = nend;
continue;
}
if (gap < mingap)
mingap = gap;
break;
}
r[output].zr_start = start;
r[output].zr_end = end;
output++;
}
ASSERT3U(output, <, eip->zei_range_count);
eip->zei_range_count = output;
eip->zei_mingap = mingap;
eip->zei_allowed_mingap = new_allowed_gap;
}
static void
zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
if (count >= MAX_RANGES) {
zei_shrink_ranges(eip);
count = eip->zei_range_count;
}
if (count == 0) {
eip->zei_mingap = UINT32_MAX;
eip->zei_allowed_mingap = 1;
} else {
int gap = start - r[count - 1].zr_end;
if (gap < eip->zei_allowed_mingap) {
r[count - 1].zr_end = end;
return;
}
if (gap < eip->zei_mingap)
eip->zei_mingap = gap;
}
r[count].zr_start = start;
r[count].zr_end = end;
eip->zei_range_count++;
}
static size_t
zei_range_total_size(zfs_ecksum_info_t *eip)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
size_t result = 0;
size_t idx;
for (idx = 0; idx < count; idx++)
result += (r[idx].zr_end - r[idx].zr_start);
return (result);
}
static zfs_ecksum_info_t *
annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
const abd_t *goodabd, const abd_t *badabd, size_t size,
boolean_t drop_if_identical)
{
const uint64_t *good;
const uint64_t *bad;
size_t nui64s = size / sizeof (uint64_t);
size_t inline_size;
int no_inline = 0;
size_t idx;
size_t range;
size_t offset = 0;
ssize_t start = -1;
zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
/* don't do any annotation for injected checksum errors */
if (info != NULL && info->zbc_injected)
return (eip);
if (info != NULL && info->zbc_has_cksum) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_expected) / sizeof (uint64_t),
(uint64_t *)&info->zbc_expected,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_actual) / sizeof (uint64_t),
(uint64_t *)&info->zbc_actual,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
DATA_TYPE_STRING,
info->zbc_checksum_name,
NULL);
if (info->zbc_byteswapped) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
DATA_TYPE_BOOLEAN, 1,
NULL);
}
}
if (badabd == NULL || goodabd == NULL)
return (eip);
ASSERT3U(nui64s, <=, UINT32_MAX);
ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, <=, UINT32_MAX);
good = (const uint64_t *) abd_borrow_buf_copy((abd_t *)goodabd, size);
bad = (const uint64_t *) abd_borrow_buf_copy((abd_t *)badabd, size);
/* build up the range list by comparing the two buffers. */
for (idx = 0; idx < nui64s; idx++) {
if (good[idx] == bad[idx]) {
if (start == -1)
continue;
zei_add_range(eip, start, idx);
start = -1;
} else {
if (start != -1)
continue;
start = idx;
}
}
if (start != -1)
zei_add_range(eip, start, idx);
/* See if it will fit in our inline buffers */
inline_size = zei_range_total_size(eip);
if (inline_size > ZFM_MAX_INLINE)
no_inline = 1;
/*
* If there is no change and we want to drop if the buffers are
* identical, do so.
*/
if (inline_size == 0 && drop_if_identical) {
kmem_free(eip, sizeof (*eip));
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
return (NULL);
}
/*
* Now walk through the ranges, filling in the details of the
* differences. Also convert our uint64_t-array offsets to byte
* offsets.
*/
for (range = 0; range < eip->zei_range_count; range++) {
size_t start = eip->zei_ranges[range].zr_start;
size_t end = eip->zei_ranges[range].zr_end;
for (idx = start; idx < end; idx++) {
uint64_t set, cleared;
// bits set in bad, but not in good
set = ((~good[idx]) & bad[idx]);
// bits set in good, but not in bad
cleared = (good[idx] & (~bad[idx]));
if (!no_inline) {
ASSERT3U(offset, <, inline_size);
eip->zei_bits_set[offset] = set;
eip->zei_bits_cleared[offset] = cleared;
offset++;
}
update_histogram(set, eip->zei_histogram_set,
&eip->zei_range_sets[range]);
update_histogram(cleared, eip->zei_histogram_cleared,
&eip->zei_range_clears[range]);
}
/* convert to byte offsets */
eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
}
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
eip->zei_allowed_mingap *= sizeof (uint64_t);
inline_size *= sizeof (uint64_t);
/* fill in ereport */
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
(uint32_t *)eip->zei_ranges,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
DATA_TYPE_UINT32, eip->zei_allowed_mingap,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
NULL);
if (!no_inline) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_cleared,
NULL);
} else {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
DATA_TYPE_UINT8_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
DATA_TYPE_UINT8_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
NULL);
}
return (eip);
}
#else
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
(void) spa, (void) vd;
}
#endif
/*
* Make sure our event is still valid for the given zio/vdev/pool. For example,
* we don't want to keep logging events for a faulted or missing vdev.
*/
boolean_t
zfs_ereport_is_valid(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio)
{
#ifdef _KERNEL
/*
* If we are doing a spa_tryimport() or in recovery mode,
* ignore errors.
*/
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER)
return (B_FALSE);
/*
* If we are in the middle of opening a pool, and the previous attempt
* failed, don't bother logging any new ereports - we're just going to
* get the same diagnosis anyway.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE &&
spa->spa_last_open_failed)
return (B_FALSE);
if (zio != NULL) {
/*
* If this is not a read or write zio, ignore the error. This
* can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
*/
if (zio->io_type != ZIO_TYPE_READ &&
zio->io_type != ZIO_TYPE_WRITE)
return (B_FALSE);
if (vd != NULL) {
/*
* If the vdev has already been marked as failing due
* to a failed probe, then ignore any subsequent I/O
* errors, as the DE will automatically fault the vdev
* on the first such failure. This also catches cases
* where vdev_remove_wanted is set and the device has
* not yet been asynchronously placed into the REMOVED
* state.
*/
if (zio->io_vd == vd && !vdev_accessible(vd, zio))
return (B_FALSE);
/*
* Ignore checksum errors for reads from DTL regions of
* leaf vdevs.
*/
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_error == ECKSUM &&
vd->vdev_ops->vdev_op_leaf &&
vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
return (B_FALSE);
}
}
/*
* For probe failure, we want to avoid posting ereports if we've
* already removed the device in the meantime.
*/
if (vd != NULL &&
strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
(vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
return (B_FALSE);
/* Ignore bogus delay events (like from ioctls or unqueued IOs) */
if ((strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) &&
(zio != NULL) && (!zio->io_timestamp)) {
return (B_FALSE);
}
#else
(void) subclass, (void) spa, (void) vd, (void) zio;
#endif
return (B_TRUE);
}
/*
* Post an ereport for the given subclass
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
if (!zfs_ereport_is_valid(subclass, spa, vd, zio))
return (EINVAL);
if (zfs_ereport_is_duplicate(subclass, spa, vd, zb, zio, 0, 0))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(subclass, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, subclass, spa, vd,
zb, zio, state, 0))
return (SET_ERROR(EINVAL)); /* couldn't post event */
if (ereport == NULL)
return (SET_ERROR(EINVAL));
/* Cleanup is handled by the callback function */
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
#else
(void) subclass, (void) spa, (void) vd, (void) zb, (void) zio,
(void) state;
#endif
return (rc);
}
/*
* Prepare a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length, zio_bad_cksum_t *info)
{
zio_cksum_report_t *report;
#ifdef _KERNEL
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
#else
(void) zb, (void) offset;
#endif
report = kmem_zalloc(sizeof (*report), KM_SLEEP);
zio_vsd_default_cksum_report(zio, report);
/* copy the checksum failure information if it was provided */
if (info != NULL) {
report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
memcpy(report->zcr_ckinfo, info, sizeof (*info));
}
report->zcr_sector = 1ULL << vd->vdev_top->vdev_ashift;
report->zcr_align =
vdev_psize_to_asize(vd->vdev_top, report->zcr_sector);
report->zcr_length = length;
#ifdef _KERNEL
(void) zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio, offset, length);
if (report->zcr_ereport == NULL) {
zfs_ereport_free_checksum(report);
return (0);
}
#endif
mutex_enter(&spa->spa_errlist_lock);
report->zcr_next = zio->io_logical->io_cksum_report;
zio->io_logical->io_cksum_report = report;
mutex_exit(&spa->spa_errlist_lock);
return (0);
}
void
zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data,
const abd_t *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
zfs_ecksum_info_t *info;
info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
good_data, bad_data, report->zcr_length, drop_if_identical);
if (info != NULL)
zfs_zevent_post(report->zcr_ereport,
report->zcr_detector, zfs_zevent_post_cb);
else
zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
kmem_free(info, sizeof (*info));
#else
(void) report, (void) good_data, (void) bad_data,
(void) drop_if_identical;
#endif
}
void
zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
{
#ifdef _KERNEL
if (rpt->zcr_ereport != NULL) {
fm_nvlist_destroy(rpt->zcr_ereport,
FM_NVA_FREE);
fm_nvlist_destroy(rpt->zcr_detector,
FM_NVA_FREE);
}
#endif
rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
if (rpt->zcr_ckinfo != NULL)
kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
kmem_free(rpt, sizeof (*rpt));
}
/*
* Post a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length,
const abd_t *good_data, const abd_t *bad_data, zio_bad_cksum_t *zbc)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
zfs_ecksum_info_t *info;
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, FM_EREPORT_ZFS_CHECKSUM,
spa, vd, zb, zio, offset, length) || (ereport == NULL)) {
return (SET_ERROR(EINVAL));
}
info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
B_FALSE);
if (info != NULL) {
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
kmem_free(info, sizeof (*info));
}
#else
(void) spa, (void) vd, (void) zb, (void) zio, (void) offset,
(void) length, (void) good_data, (void) bad_data, (void) zbc;
#endif
return (rc);
}
/*
* The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
* change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
* and are designed to be consumed by the ZFS Event Daemon (ZED). For
* additional details refer to the zed(8) man page.
*/
nvlist_t *
zfs_event_create(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
nvlist_t *resource = NULL;
#ifdef _KERNEL
char class[64];
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
return (NULL);
if ((resource = fm_nvlist_create(NULL)) == NULL)
return (NULL);
(void) snprintf(class, sizeof (class), "%s.%s.%s", type,
ZFS_ERROR_CLASS, name);
VERIFY0(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION));
VERIFY0(nvlist_add_string(resource, FM_CLASS, class));
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL, spa_name(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, spa_state(spa)));
VERIFY0(nvlist_add_int32(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, spa_load_state(spa)));
if (vd) {
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state));
if (vd->vdev_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path));
if (vd->vdev_devid != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid));
if (vd->vdev_fru != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru));
if (vd->vdev_enc_sysfs_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path));
}
/* also copy any optional payload data */
if (aux) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
(void) nvlist_add_nvpair(resource, elem);
}
#else
(void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
return (resource);
}
static void
zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, type, name, aux);
if (resource)
zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
#else
(void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
}
/*
* The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
* has been removed from the system. This will cause the DE to ignore any
* recent I/O errors, inferring that they are due to the asynchronous device
* removal.
*/
void
zfs_post_remove(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_REMOVED, NULL);
}
/*
* The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
* has the 'autoreplace' property set, and therefore any broken vdevs will be
* handled by higher level logic, and no vdev fault should be generated.
*/
void
zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_AUTOREPLACE, NULL);
}
/*
* The 'resource.fs.zfs.statechange' event is an internal signal that the
* given vdev has transitioned its state to DEGRADED or HEALTHY. This will
* cause the retire agent to repair any outstanding fault management cases
* open because the device was not found (fault.fs.zfs.device).
*/
void
zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate)
{
#ifdef _KERNEL
nvlist_t *aux;
/*
* Add optional supplemental keys to payload
*/
aux = fm_nvlist_create(NULL);
if (vd && aux) {
if (vd->vdev_physpath) {
fnvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH,
vd->vdev_physpath);
}
if (vd->vdev_enc_sysfs_path) {
fnvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
}
fnvlist_add_uint64(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE, laststate);
}
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_STATECHANGE,
aux);
if (aux)
fm_nvlist_destroy(aux, FM_NVA_FREE);
#else
(void) spa, (void) vd, (void) laststate;
#endif
}
#ifdef _KERNEL
void
zfs_ereport_init(void)
{
mutex_init(&recent_events_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&recent_events_list, sizeof (recent_events_node_t),
offsetof(recent_events_node_t, re_list_link));
avl_create(&recent_events_tree, recent_events_compare,
sizeof (recent_events_node_t), offsetof(recent_events_node_t,
re_tree_link));
}
/*
* This 'early' fini needs to run before zfs_fini() which on Linux waits
* for the system_delay_taskq to drain.
*/
void
zfs_ereport_taskq_fini(void)
{
mutex_enter(&recent_events_lock);
if (recent_events_cleaner_tqid != 0) {
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid);
recent_events_cleaner_tqid = 0;
}
mutex_exit(&recent_events_lock);
}
void
zfs_ereport_fini(void)
{
recent_events_node_t *entry;
- while ((entry = list_head(&recent_events_list)) != NULL) {
+ while ((entry = list_remove_head(&recent_events_list)) != NULL) {
avl_remove(&recent_events_tree, entry);
- list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
avl_destroy(&recent_events_tree);
list_destroy(&recent_events_list);
mutex_destroy(&recent_events_lock);
}
void
zfs_ereport_snapshot_post(const char *subclass, spa_t *spa, const char *name)
{
nvlist_t *aux;
aux = fm_nvlist_create(NULL);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME, name);
zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
fm_nvlist_destroy(aux, FM_NVA_FREE);
}
/*
* Post when a event when a zvol is created or removed
*
* This is currently only used by macOS, since it uses the event to create
* symlinks between the volume name (mypool/myvol) and the actual /dev
* device (/dev/disk3). For example:
*
* /var/run/zfs/dsk/mypool/myvol -> /dev/disk3
*
* name: The full name of the zvol ("mypool/myvol")
* dev_name: The full /dev name for the zvol ("/dev/disk3")
* raw_name: The raw /dev name for the zvol ("/dev/rdisk3")
*/
void
zfs_ereport_zvol_post(const char *subclass, const char *name,
const char *dev_name, const char *raw_name)
{
nvlist_t *aux;
char *r;
boolean_t locked = mutex_owned(&spa_namespace_lock);
if (!locked) mutex_enter(&spa_namespace_lock);
spa_t *spa = spa_lookup(name);
if (!locked) mutex_exit(&spa_namespace_lock);
if (spa == NULL)
return;
aux = fm_nvlist_create(NULL);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME, dev_name);
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME,
raw_name);
r = strchr(name, '/');
if (r && r[1])
fnvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_VOLUME, &r[1]);
zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
fm_nvlist_destroy(aux, FM_NVA_FREE);
}
EXPORT_SYMBOL(zfs_ereport_post);
EXPORT_SYMBOL(zfs_ereport_is_valid);
EXPORT_SYMBOL(zfs_ereport_post_checksum);
EXPORT_SYMBOL(zfs_post_remove);
EXPORT_SYMBOL(zfs_post_autoreplace);
EXPORT_SYMBOL(zfs_post_state_change);
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_max, UINT, ZMOD_RW,
"Maximum recent zevents records to retain for duplicate checking");
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_expire_secs, UINT, ZMOD_RW,
"Expiration time for recent zevents records");
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fuid.c b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
index 44aaae9c1264..add4241dcc99 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fuid.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
@@ -1,809 +1,805 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/avl.h>
#include <sys/zap.h>
#include <sys/nvpair.h>
#ifdef _KERNEL
#include <sys/sid.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#endif
#include <sys/zfs_fuid.h>
/*
* FUID Domain table(s).
*
* The FUID table is stored as a packed nvlist of an array
* of nvlists which contain an index, domain string and offset
*
* During file system initialization the nvlist(s) are read and
* two AVL trees are created. One tree is keyed by the index number
* and the other by the domain string. Nodes are never removed from
* trees, but new entries may be added. If a new entry is added then
* the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
* be responsible for calling zfs_fuid_sync() to sync the changes to disk.
*
*/
#define FUID_IDX "fuid_idx"
#define FUID_DOMAIN "fuid_domain"
#define FUID_OFFSET "fuid_offset"
#define FUID_NVP_ARRAY "fuid_nvlist"
typedef struct fuid_domain {
avl_node_t f_domnode;
avl_node_t f_idxnode;
ksiddomain_t *f_ksid;
uint64_t f_idx;
} fuid_domain_t;
static const char *const nulldomain = "";
/*
* Compare two indexes.
*/
static int
idx_compare(const void *arg1, const void *arg2)
{
const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
return (TREE_CMP(node1->f_idx, node2->f_idx));
}
/*
* Compare two domain strings.
*/
static int
domain_compare(const void *arg1, const void *arg2)
{
const fuid_domain_t *node1 = (const fuid_domain_t *)arg1;
const fuid_domain_t *node2 = (const fuid_domain_t *)arg2;
int val;
val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
return (TREE_ISIGN(val));
}
void
zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
{
avl_create(idx_tree, idx_compare,
sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
avl_create(domain_tree, domain_compare,
sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
}
/*
* load initial fuid domain and idx trees. This function is used by
* both the kernel and zdb.
*/
uint64_t
zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
avl_tree_t *domain_tree)
{
dmu_buf_t *db;
uint64_t fuid_size;
ASSERT(fuid_obj != 0);
VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
FTAG, &db));
fuid_size = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
if (fuid_size) {
nvlist_t **fuidnvp;
nvlist_t *nvp = NULL;
uint_t count;
char *packed;
int i;
packed = kmem_alloc(fuid_size, KM_SLEEP);
VERIFY(dmu_read(os, fuid_obj, 0,
fuid_size, packed, DMU_READ_PREFETCH) == 0);
VERIFY(nvlist_unpack(packed, fuid_size,
&nvp, 0) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
&fuidnvp, &count) == 0);
for (i = 0; i != count; i++) {
fuid_domain_t *domnode;
const char *domain;
uint64_t idx;
VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
&domain) == 0);
VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
&idx) == 0);
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
domnode->f_idx = idx;
domnode->f_ksid = ksid_lookupdomain(domain);
avl_add(idx_tree, domnode);
avl_add(domain_tree, domnode);
}
nvlist_free(nvp);
kmem_free(packed, fuid_size);
}
return (fuid_size);
}
void
zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
{
fuid_domain_t *domnode;
void *cookie;
cookie = NULL;
while ((domnode = avl_destroy_nodes(domain_tree, &cookie)))
ksiddomain_rele(domnode->f_ksid);
avl_destroy(domain_tree);
cookie = NULL;
while ((domnode = avl_destroy_nodes(idx_tree, &cookie)))
kmem_free(domnode, sizeof (fuid_domain_t));
avl_destroy(idx_tree);
}
const char *
zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
{
fuid_domain_t searchnode, *findnode;
avl_index_t loc;
searchnode.f_idx = idx;
findnode = avl_find(idx_tree, &searchnode, &loc);
return (findnode ? findnode->f_ksid->kd_name : nulldomain);
}
#ifdef _KERNEL
/*
* Load the fuid table(s) into memory.
*/
static void
zfs_fuid_init(zfsvfs_t *zfsvfs)
{
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
if (zfsvfs->z_fuid_loaded) {
rw_exit(&zfsvfs->z_fuid_lock);
return;
}
zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
(void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
if (zfsvfs->z_fuid_obj != 0) {
zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
&zfsvfs->z_fuid_domain);
}
zfsvfs->z_fuid_loaded = B_TRUE;
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
* sync out AVL trees to persistent storage.
*/
void
zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
nvlist_t *nvp;
nvlist_t **fuids;
size_t nvsize = 0;
char *packed;
dmu_buf_t *db;
fuid_domain_t *domnode;
int numnodes;
int i;
if (!zfsvfs->z_fuid_dirty) {
return;
}
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
/*
* First see if table needs to be created?
*/
if (zfsvfs->z_fuid_obj == 0) {
zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
ZFS_FUID_TABLES, sizeof (uint64_t), 1,
&zfsvfs->z_fuid_obj, tx) == 0);
}
VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
domnode->f_idx) == 0);
VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
domnode->f_ksid->kd_name) == 0);
}
fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
(const nvlist_t * const *)fuids, numnodes);
for (i = 0; i != numnodes; i++)
nvlist_free(fuids[i]);
kmem_free(fuids, numnodes * sizeof (void *));
VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
packed = kmem_alloc(nvsize, KM_SLEEP);
VERIFY(nvlist_pack(nvp, &packed, &nvsize,
NV_ENCODE_XDR, KM_SLEEP) == 0);
nvlist_free(nvp);
zfsvfs->z_fuid_size = nvsize;
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
zfsvfs->z_fuid_size, packed, tx);
kmem_free(packed, zfsvfs->z_fuid_size);
VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
dmu_buf_rele(db, FTAG);
zfsvfs->z_fuid_dirty = B_FALSE;
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
* Query domain table for a given domain.
*
* If domain isn't found and addok is set, it is added to AVL trees and
* the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
* necessary for the caller or another thread to detect the dirty table
* and sync out the changes.
*/
static int
zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
const char **retdomain, boolean_t addok)
{
fuid_domain_t searchnode, *findnode;
avl_index_t loc;
krw_t rw = RW_READER;
/*
* If the dummy "nobody" domain then return an index of 0
* to cause the created FUID to be a standard POSIX id
* for the user nobody.
*/
if (domain[0] == '\0') {
if (retdomain)
*retdomain = nulldomain;
return (0);
}
searchnode.f_ksid = ksid_lookupdomain(domain);
if (retdomain)
*retdomain = searchnode.f_ksid->kd_name;
if (!zfsvfs->z_fuid_loaded)
zfs_fuid_init(zfsvfs);
retry:
rw_enter(&zfsvfs->z_fuid_lock, rw);
findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
if (findnode) {
rw_exit(&zfsvfs->z_fuid_lock);
ksiddomain_rele(searchnode.f_ksid);
return (findnode->f_idx);
} else if (addok) {
fuid_domain_t *domnode;
uint64_t retidx;
if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
rw_exit(&zfsvfs->z_fuid_lock);
rw = RW_WRITER;
goto retry;
}
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
domnode->f_ksid = searchnode.f_ksid;
retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
avl_add(&zfsvfs->z_fuid_domain, domnode);
avl_add(&zfsvfs->z_fuid_idx, domnode);
zfsvfs->z_fuid_dirty = B_TRUE;
rw_exit(&zfsvfs->z_fuid_lock);
return (retidx);
} else {
rw_exit(&zfsvfs->z_fuid_lock);
return (-1);
}
}
/*
* Query domain table by index, returning domain string
*
* Returns a pointer from an avl node of the domain string.
*
*/
const char *
zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
{
const char *domain;
if (idx == 0 || !zfsvfs->z_use_fuids)
return (NULL);
if (!zfsvfs->z_fuid_loaded)
zfs_fuid_init(zfsvfs);
rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
else
domain = nulldomain;
rw_exit(&zfsvfs->z_fuid_lock);
ASSERT(domain);
return (domain);
}
void
zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
{
*uidp = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOUID(zp)),
cr, ZFS_OWNER);
*gidp = zfs_fuid_map_id(ZTOZSB(zp), KGID_TO_SGID(ZTOGID(zp)),
cr, ZFS_GROUP);
}
#ifdef __FreeBSD__
uid_t
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
uint32_t index = FUID_INDEX(fuid);
if (index == 0)
return (fuid);
return (UID_NOBODY);
}
#elif defined(__linux__)
uid_t
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
/*
* The Linux port only supports POSIX IDs, use the passed id.
*/
return (fuid);
}
#else
uid_t
zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
cred_t *cr, zfs_fuid_type_t type)
{
uint32_t index = FUID_INDEX(fuid);
const char *domain;
uid_t id;
if (index == 0)
return (fuid);
domain = zfs_fuid_find_by_idx(zfsvfs, index);
ASSERT(domain != NULL);
if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
(void) kidmap_getuidbysid(crgetzone(cr), domain,
FUID_RID(fuid), &id);
} else {
(void) kidmap_getgidbysid(crgetzone(cr), domain,
FUID_RID(fuid), &id);
}
return (id);
}
#endif
/*
* Add a FUID node to the list of fuid's being created for this
* ACL
*
* If ACL has multiple domains, then keep only one copy of each unique
* domain.
*/
void
zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
uint64_t idx, uint64_t id, zfs_fuid_type_t type)
{
zfs_fuid_t *fuid;
zfs_fuid_domain_t *fuid_domain;
zfs_fuid_info_t *fuidp;
uint64_t fuididx;
boolean_t found = B_FALSE;
if (*fuidpp == NULL)
*fuidpp = zfs_fuid_info_alloc();
fuidp = *fuidpp;
/*
* First find fuid domain index in linked list
*
* If one isn't found then create an entry.
*/
for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
fuid_domain), fuididx++) {
if (idx == fuid_domain->z_domidx) {
found = B_TRUE;
break;
}
}
if (!found) {
fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
fuid_domain->z_domain = domain;
fuid_domain->z_domidx = idx;
list_insert_tail(&fuidp->z_domains, fuid_domain);
fuidp->z_domain_str_sz += strlen(domain) + 1;
fuidp->z_domain_cnt++;
}
if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
/*
* Now allocate fuid entry and add it on the end of the list
*/
fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
fuid->z_id = id;
fuid->z_domidx = idx;
fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
list_insert_tail(&fuidp->z_fuids, fuid);
fuidp->z_fuid_cnt++;
} else {
if (type == ZFS_OWNER)
fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
else
fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
}
}
#ifdef HAVE_KSID
/*
* Create a file system FUID, based on information in the users cred
*
* If cred contains KSID_OWNER then it should be used to determine
* the uid otherwise cred's uid will be used. By default cred's gid
* is used unless it's an ephemeral ID in which case KSID_GROUP will
* be used if it exists.
*/
uint64_t
zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
cred_t *cr, zfs_fuid_info_t **fuidp)
{
uint64_t idx;
ksid_t *ksid;
uint32_t rid;
const char *kdomain, *domain;
uid_t id;
VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
if (IS_EPHEMERAL(id))
return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
return ((uint64_t)id);
}
/*
* ksid is present and FUID is supported
*/
id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
if (!IS_EPHEMERAL(id))
return ((uint64_t)id);
if (type == ZFS_GROUP)
id = ksid_getid(ksid);
rid = ksid_getrid(ksid);
domain = ksid_getdomain(ksid);
idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
return (FUID_ENCODE(idx, rid));
}
#endif /* HAVE_KSID */
/*
* Create a file system FUID for an ACL ace
* or a chown/chgrp of the file.
* This is similar to zfs_fuid_create_cred, except that
* we can't find the domain + rid information in the
* cred. Instead we have to query Winchester for the
* domain and rid.
*
* During replay operations the domain+rid information is
* found in the zfs_fuid_info_t that the replay code has
* attached to the zfsvfs of the file system.
*/
uint64_t
zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
{
#ifdef HAVE_KSID
const char *domain, *kdomain;
uint32_t fuid_idx = FUID_INDEX(id);
uint32_t rid = 0;
idmap_stat status;
uint64_t idx = UID_NOBODY;
zfs_fuid_t *zfuid = NULL;
zfs_fuid_info_t *fuidp = NULL;
/*
* If POSIX ID, or entry is already a FUID then
* just return the id
*
* We may also be handed an already FUID'ized id via
* chmod.
*/
if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
return (id);
if (zfsvfs->z_replay) {
fuidp = zfsvfs->z_fuid_replay;
/*
* If we are passed an ephemeral id, but no
* fuid_info was logged then return NOBODY.
* This is most likely a result of idmap service
* not being available.
*/
if (fuidp == NULL)
return (UID_NOBODY);
VERIFY3U(type, >=, ZFS_OWNER);
VERIFY3U(type, <=, ZFS_ACE_GROUP);
switch (type) {
case ZFS_ACE_USER:
case ZFS_ACE_GROUP:
zfuid = list_head(&fuidp->z_fuids);
rid = FUID_RID(zfuid->z_logfuid);
idx = FUID_INDEX(zfuid->z_logfuid);
break;
case ZFS_OWNER:
rid = FUID_RID(fuidp->z_fuid_owner);
idx = FUID_INDEX(fuidp->z_fuid_owner);
break;
case ZFS_GROUP:
rid = FUID_RID(fuidp->z_fuid_group);
idx = FUID_INDEX(fuidp->z_fuid_group);
break;
}
domain = fuidp->z_domain_table[idx - 1];
} else {
if (type == ZFS_OWNER || type == ZFS_ACE_USER)
status = kidmap_getsidbyuid(crgetzone(cr), id,
&domain, &rid);
else
status = kidmap_getsidbygid(crgetzone(cr), id,
&domain, &rid);
if (status != 0) {
/*
* When returning nobody we will need to
* make a dummy fuid table entry for logging
* purposes.
*/
rid = UID_NOBODY;
domain = nulldomain;
}
}
idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
if (!zfsvfs->z_replay)
zfs_fuid_node_add(fuidpp, kdomain,
rid, idx, id, type);
else if (zfuid != NULL) {
list_remove(&fuidp->z_fuids, zfuid);
kmem_free(zfuid, sizeof (zfs_fuid_t));
}
return (FUID_ENCODE(idx, rid));
#else
/*
* The Linux port only supports POSIX IDs, use the passed id.
*/
return (id);
#endif
}
void
zfs_fuid_destroy(zfsvfs_t *zfsvfs)
{
rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
if (!zfsvfs->z_fuid_loaded) {
rw_exit(&zfsvfs->z_fuid_lock);
return;
}
zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
rw_exit(&zfsvfs->z_fuid_lock);
}
/*
* Allocate zfs_fuid_info for tracking FUIDs created during
* zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
*/
zfs_fuid_info_t *
zfs_fuid_info_alloc(void)
{
zfs_fuid_info_t *fuidp;
fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
offsetof(zfs_fuid_domain_t, z_next));
list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
offsetof(zfs_fuid_t, z_next));
return (fuidp);
}
/*
* Release all memory associated with zfs_fuid_info_t
*/
void
zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
{
zfs_fuid_t *zfuid;
zfs_fuid_domain_t *zdomain;
- while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
- list_remove(&fuidp->z_fuids, zfuid);
+ while ((zfuid = list_remove_head(&fuidp->z_fuids)) != NULL)
kmem_free(zfuid, sizeof (zfs_fuid_t));
- }
if (fuidp->z_domain_table != NULL)
kmem_free(fuidp->z_domain_table,
(sizeof (char *)) * fuidp->z_domain_cnt);
- while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
- list_remove(&fuidp->z_domains, zdomain);
+ while ((zdomain = list_remove_head(&fuidp->z_domains)) != NULL)
kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
- }
kmem_free(fuidp, sizeof (zfs_fuid_info_t));
}
/*
* Check to see if id is a groupmember. If cred
* has ksid info then sidlist is checked first
* and if still not found then POSIX groups are checked
*
* Will use a straight FUID compare when possible.
*/
boolean_t
zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
{
uid_t gid;
#ifdef illumos
ksid_t *ksid = crgetsid(cr, KSID_GROUP);
ksidlist_t *ksidlist = crgetsidlist(cr);
if (ksid && ksidlist) {
int i;
ksid_t *ksid_groups;
uint32_t idx = FUID_INDEX(id);
uint32_t rid = FUID_RID(id);
ksid_groups = ksidlist->ksl_sids;
for (i = 0; i != ksidlist->ksl_nsid; i++) {
if (idx == 0) {
if (id != IDMAP_WK_CREATOR_GROUP_GID &&
id == ksid_groups[i].ks_id) {
return (B_TRUE);
}
} else {
const char *domain;
domain = zfs_fuid_find_by_idx(zfsvfs, idx);
ASSERT(domain != NULL);
if (strcmp(domain,
IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
return (B_FALSE);
if ((strcmp(domain,
ksid_groups[i].ks_domain->kd_name) == 0) &&
rid == ksid_groups[i].ks_rid)
return (B_TRUE);
}
}
}
#endif /* illumos */
/*
* Not found in ksidlist, check posix groups
*/
gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
return (groupmember(gid, cr));
}
void
zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
{
if (zfsvfs->z_fuid_obj == 0) {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
FUID_SIZE_ESTIMATE(zfsvfs));
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
} else {
dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
FUID_SIZE_ESTIMATE(zfsvfs));
}
}
/*
* buf must be big enough (eg, 32 bytes)
*/
int
zfs_id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
char *buf, size_t len, boolean_t addok)
{
uint64_t fuid;
int domainid = 0;
if (domain && domain[0]) {
domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
if (domainid == -1)
return (SET_ERROR(ENOENT));
}
fuid = FUID_ENCODE(domainid, rid);
(void) snprintf(buf, len, "%llx", (longlong_t)fuid);
return (0);
}
#endif
diff --git a/sys/contrib/openzfs/module/zfs/zfs_onexit.c b/sys/contrib/openzfs/module/zfs/zfs_onexit.c
index 63acf7ab2e4d..7bf804b67790 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_onexit.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_onexit.c
@@ -1,176 +1,175 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2020 by Delphix. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/kmem.h>
#include <sys/sunddi.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
/*
* ZFS kernel routines may add/delete callback routines to be invoked
* upon process exit (triggered via the close operation from the /dev/zfs
* driver).
*
* These cleanup callbacks are intended to allow for the accumulation
* of kernel state across multiple ioctls. User processes participate
* simply by opening ZFS_DEV. This causes the ZFS driver to do create
* some private data for the file descriptor and generating a unique
* minor number. The process then passes along that file descriptor to
* each ioctl that might have a cleanup operation.
*
* Consumers of the onexit routines should call zfs_onexit_fd_hold() early
* on to validate the given fd and add a reference to its file table entry.
* This allows the consumer to do its work and then add a callback, knowing
* that zfs_onexit_add_cb() won't fail with EBADF. When finished, consumers
* should call zfs_onexit_fd_rele().
*
* A simple example is zfs_ioc_recv(), where we might create an AVL tree
* with dataset/GUID mappings and then reuse that tree on subsequent
* zfs_ioc_recv() calls.
*
* On the first zfs_ioc_recv() call, dmu_recv_stream() will kmem_alloc()
* the AVL tree and pass it along with a callback function to
* zfs_onexit_add_cb(). The zfs_onexit_add_cb() routine will register the
* callback and return an action handle.
*
* The action handle is then passed from user space to subsequent
* zfs_ioc_recv() calls, so that dmu_recv_stream() can fetch its AVL tree
* by calling zfs_onexit_cb_data() with the device minor number and
* action handle.
*
* If the user process exits abnormally, the callback is invoked implicitly
* as part of the driver close operation. Once the user space process is
* finished with the accumulated kernel state, it can also just call close(2)
* on the cleanup fd to trigger the cleanup callback.
*/
void
zfs_onexit_init(zfs_onexit_t **zop)
{
zfs_onexit_t *zo;
zo = *zop = kmem_zalloc(sizeof (zfs_onexit_t), KM_SLEEP);
mutex_init(&zo->zo_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zo->zo_actions, sizeof (zfs_onexit_action_node_t),
offsetof(zfs_onexit_action_node_t, za_link));
}
void
zfs_onexit_destroy(zfs_onexit_t *zo)
{
zfs_onexit_action_node_t *ap;
mutex_enter(&zo->zo_lock);
- while ((ap = list_head(&zo->zo_actions)) != NULL) {
- list_remove(&zo->zo_actions, ap);
+ while ((ap = list_remove_head(&zo->zo_actions)) != NULL) {
mutex_exit(&zo->zo_lock);
ap->za_func(ap->za_data);
kmem_free(ap, sizeof (zfs_onexit_action_node_t));
mutex_enter(&zo->zo_lock);
}
mutex_exit(&zo->zo_lock);
list_destroy(&zo->zo_actions);
mutex_destroy(&zo->zo_lock);
kmem_free(zo, sizeof (zfs_onexit_t));
}
/*
* Consumers might need to operate by minor number instead of fd, since
* they might be running in another thread (e.g. txg_sync_thread). Callers
* of this function must call zfs_onexit_fd_rele() when they're finished
* using the minor number.
*/
zfs_file_t *
zfs_onexit_fd_hold(int fd, minor_t *minorp)
{
zfs_onexit_t *zo = NULL;
zfs_file_t *fp = zfs_file_get(fd);
if (fp == NULL)
return (NULL);
int error = zfsdev_getminor(fp, minorp);
if (error) {
zfs_onexit_fd_rele(fp);
return (NULL);
}
zo = zfsdev_get_state(*minorp, ZST_ONEXIT);
if (zo == NULL) {
zfs_onexit_fd_rele(fp);
return (NULL);
}
return (fp);
}
void
zfs_onexit_fd_rele(zfs_file_t *fp)
{
zfs_file_put(fp);
}
static int
zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
{
*zo = zfsdev_get_state(minor, ZST_ONEXIT);
if (*zo == NULL)
return (SET_ERROR(EBADF));
return (0);
}
/*
* Add a callback to be invoked when the calling process exits.
*/
int
zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data,
uintptr_t *action_handle)
{
zfs_onexit_t *zo;
zfs_onexit_action_node_t *ap;
int error;
error = zfs_onexit_minor_to_state(minor, &zo);
if (error)
return (error);
ap = kmem_alloc(sizeof (zfs_onexit_action_node_t), KM_SLEEP);
list_link_init(&ap->za_link);
ap->za_func = func;
ap->za_data = data;
mutex_enter(&zo->zo_lock);
list_insert_tail(&zo->zo_actions, ap);
mutex_exit(&zo->zo_lock);
if (action_handle)
*action_handle = (uintptr_t)ap;
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index d887e4900d1d..8c1fe5f66838 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,4089 +1,4257 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/brt.h>
#include <sys/wmsum.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
static uint_t zfs_commit_timeout_pct = 5;
/*
* Minimal time we care to delay commit waiting for more ZIL records.
* At least FreeBSD kernel can't sleep for less than 2us at its best.
* So requests to sleep for less then 5us is a waste of CPU time with
* a risk of significant log latency increase due to oversleep.
*/
static uint64_t zil_min_commit_timeout = 5000;
/*
* See zil.h for more information about these fields.
*/
static zil_kstat_values_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 },
};
static zil_sums_t zil_sums_global;
static kstat_t *zil_kstats_global;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
static int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
static uint64_t zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
+static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
+static itx_t *zil_itx_clone(itx_t *oitx);
+
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
static int
zil_kstats_global_update(kstat_t *ksp, int rw)
{
zil_kstat_values_t *zs = ksp->ks_data;
ASSERT3P(&zil_stats, ==, zs);
if (rw == KSTAT_WRITE) {
return (SET_ERROR(EACCES));
}
zil_kstat_values_update(zs, &zil_sums_global);
return (0);
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
- blkptr_t *nbp, void *dst, char **end)
+ blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
- arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
- &abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
+ abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
+ uint64_t size = BP_GET_LSIZE(bp);
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
- zil_chain_t *zilc = abuf->b_data;
+ zil_chain_t *zilc = (*abuf)->b_data;
char *lr = (char *)(zilc + 1);
- uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
- sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
+ sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
+ zilc->zc_nused < sizeof (*zilc) ||
+ zilc->zc_nused > size) {
error = SET_ERROR(ECKSUM);
} else {
- ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
- memcpy(dst, lr, len);
- *end = (char *)dst + len;
+ *begin = lr;
+ *end = lr + zilc->zc_nused - sizeof (*zilc);
*nbp = zilc->zc_next_blk;
}
} else {
- char *lr = abuf->b_data;
- uint64_t size = BP_GET_LSIZE(bp);
+ char *lr = (*abuf)->b_data;
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
- ASSERT3U(zilc->zc_nused, <=,
- SPA_OLD_MAXBLOCKSIZE);
- memcpy(dst, lr, zilc->zc_nused);
- *end = (char *)dst + zilc->zc_nused;
+ *begin = lr;
+ *end = lr + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
-
- arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
memcpy(wbuf, abuf->b_data, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
void
zil_sums_init(zil_sums_t *zs)
{
wmsum_init(&zs->zil_commit_count, 0);
wmsum_init(&zs->zil_commit_writer_count, 0);
wmsum_init(&zs->zil_itx_count, 0);
wmsum_init(&zs->zil_itx_indirect_count, 0);
wmsum_init(&zs->zil_itx_indirect_bytes, 0);
wmsum_init(&zs->zil_itx_copied_count, 0);
wmsum_init(&zs->zil_itx_copied_bytes, 0);
wmsum_init(&zs->zil_itx_needcopy_count, 0);
wmsum_init(&zs->zil_itx_needcopy_bytes, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_count, 0);
wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0);
+ wmsum_init(&zs->zil_itx_metaslab_normal_write, 0);
+ wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_count, 0);
wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0);
+ wmsum_init(&zs->zil_itx_metaslab_slog_write, 0);
+ wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0);
}
void
zil_sums_fini(zil_sums_t *zs)
{
wmsum_fini(&zs->zil_commit_count);
wmsum_fini(&zs->zil_commit_writer_count);
wmsum_fini(&zs->zil_itx_count);
wmsum_fini(&zs->zil_itx_indirect_count);
wmsum_fini(&zs->zil_itx_indirect_bytes);
wmsum_fini(&zs->zil_itx_copied_count);
wmsum_fini(&zs->zil_itx_copied_bytes);
wmsum_fini(&zs->zil_itx_needcopy_count);
wmsum_fini(&zs->zil_itx_needcopy_bytes);
wmsum_fini(&zs->zil_itx_metaslab_normal_count);
wmsum_fini(&zs->zil_itx_metaslab_normal_bytes);
+ wmsum_fini(&zs->zil_itx_metaslab_normal_write);
+ wmsum_fini(&zs->zil_itx_metaslab_normal_alloc);
wmsum_fini(&zs->zil_itx_metaslab_slog_count);
wmsum_fini(&zs->zil_itx_metaslab_slog_bytes);
+ wmsum_fini(&zs->zil_itx_metaslab_slog_write);
+ wmsum_fini(&zs->zil_itx_metaslab_slog_alloc);
}
void
zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
{
zs->zil_commit_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_count);
zs->zil_commit_writer_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_writer_count);
zs->zil_itx_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_count);
zs->zil_itx_indirect_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_count);
zs->zil_itx_indirect_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_indirect_bytes);
zs->zil_itx_copied_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_count);
zs->zil_itx_copied_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_copied_bytes);
zs->zil_itx_needcopy_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_count);
zs->zil_itx_needcopy_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_needcopy_bytes);
zs->zil_itx_metaslab_normal_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_count);
zs->zil_itx_metaslab_normal_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes);
+ zs->zil_itx_metaslab_normal_write.value.ui64 =
+ wmsum_value(&zil_sums->zil_itx_metaslab_normal_write);
+ zs->zil_itx_metaslab_normal_alloc.value.ui64 =
+ wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc);
zs->zil_itx_metaslab_slog_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_count);
zs->zil_itx_metaslab_slog_bytes.value.ui64 =
wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes);
+ zs->zil_itx_metaslab_slog_write.value.ui64 =
+ wmsum_value(&zil_sums->zil_itx_metaslab_slog_write);
+ zs->zil_itx_metaslab_slog_alloc.value.ui64 =
+ wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk = {{{{0}}}};
- char *lrbuf, *lrp;
int error = 0;
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
- lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
- char *end = NULL;
+ char *lrp, *end;
+ arc_buf_t *abuf = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
- lrbuf, &end);
+ &lrp, &end, &abuf);
if (error != 0) {
+ if (abuf)
+ arc_buf_destroy(abuf, &abuf);
if (claimed) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS read log block error %d, "
"dataset %s, seq 0x%llx\n", error, name,
(u_longlong_t)blk_seq);
}
break;
}
- for (lrp = lrbuf; lrp < end; lrp += reclen) {
+ for (; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq)
goto done;
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0)
goto done;
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
+ arc_buf_destroy(abuf, &abuf);
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
zil_bp_tree_fini(zilog);
- zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
return (error);
}
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
(void) tx;
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
(void) zilog, (void) lrc, (void) tx, (void) first_txg;
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
ASSERT(lrc->lrc_txtype == TX_WRITE);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
static int
zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
spa_t *spa;
uint_t ii;
ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
if (tx == NULL) {
return (0);
}
/*
* XXX: Do we need to byteswap lr?
*/
spa = zilog->zl_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
/*
* When data in embedded into BP there is no need to create
* BRT entry as there is no data block. Just copy the BP as
* it contains the data.
*/
if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
brt_pending_add(spa, bp, tx);
}
}
return (0);
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_claim_write(zilog, lrc, tx, first_txg));
case TX_CLONE_RANGE:
return (zil_claim_clone_range(zilog, lrc, tx));
default:
return (0);
}
}
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
(void) claim_txg;
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
ASSERT(lrc->lrc_txtype == TX_WRITE);
/*
* If we previously claimed it, we need to free it.
*/
if (bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp)) {
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
}
return (0);
}
static int
zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx)
{
const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc;
const blkptr_t *bp;
spa_t *spa;
uint_t ii;
ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE);
if (tx == NULL) {
return (0);
}
spa = zilog->zl_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
if (!BP_IS_HOLE(bp)) {
zio_free(spa, dmu_tx_get_txg(tx), bp);
}
}
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
if (claim_txg == 0) {
return (0);
}
switch (lrc->lrc_txtype) {
case TX_WRITE:
return (zil_free_write(zilog, lrc, tx, claim_txg));
case TX_CLONE_RANGE:
return (zil_free_clone_range(zilog, lrc, tx));
default:
return (0);
}
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
boolean_t fastwrite)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
lwb->lwb_fastwrite = fastwrite;
lwb->lwb_slog = slog;
+ lwb->lwb_indirect = B_FALSE;
+ if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
+ lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
+ lwb->lwb_sz = BP_GET_LSIZE(bp);
+ } else {
+ lwb->lwb_nused = lwb->lwb_nfilled = 0;
+ lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
+ }
lwb->lwb_state = LWB_STATE_CLOSED;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
- lwb->lwb_max_txg = txg;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_issued_timestamp = 0;
lwb->lwb_issued_txg = 0;
- if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
- lwb->lwb_nused = sizeof (zil_chain_t);
- lwb->lwb_sz = BP_GET_LSIZE(bp);
- } else {
- lwb->lwb_nused = 0;
- lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
- }
+ lwb->lwb_max_txg = txg;
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
ASSERT(list_is_empty(&lwb->lwb_waiters));
ASSERT(list_is_empty(&lwb->lwb_itxs));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Its called in zil_commit context (zil_process_commit_list()/zil_create()).
* It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled.
* Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every
* zil_commit.
*/
static void
zil_commit_activate_saxattr_feature(zilog_t *zilog)
{
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
!dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(ds, tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
/*
* If "zilsaxattr" feature is enabled on zpool, then activate
* it now when we're creating the ZIL chain. We can't wait with
* this until we write the first xattr log record because we
* need to wait for the feature activation to sync out.
*/
if (spa_feature_is_enabled(zilog->zl_spa,
SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) !=
DMU_OST_ZVOL) {
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
}
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
} else {
/*
* This branch covers the case where we enable the feature on a
* zpool that has existing ZIL headers.
*/
zil_commit_activate_saxattr_feature(zilog);
}
IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) &&
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL,
dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR));
ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return (B_FALSE);
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa,
&lwb->lwb_blk);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
return (B_TRUE);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
(void) dp;
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_lock, thus it must be held when calling this function.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE);
list_insert_tail(&lwb->lwb_waiters, zcw);
zcw->zcw_lwb = lwb;
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
mutex_exit(&zcw->zcw_lock);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
zil_commit_waiter_t *zcw;
itx_t *itx;
uint64_t txg;
+ list_t itxs, waiters;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
+ list_create(&itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
+ list_create(&waiters, sizeof (zil_commit_waiter_t),
+ offsetof(zil_commit_waiter_t, zcw_node));
+
hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp;
mutex_enter(&zilog->zl_lock);
zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8;
lwb->lwb_root_zio = NULL;
- ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
- lwb->lwb_state = LWB_STATE_FLUSH_DONE;
-
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
- while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
+ list_move_tail(&itxs, &lwb->lwb_itxs);
+ list_move_tail(&waiters, &lwb->lwb_waiters);
+
+ ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
+ lwb->lwb_state = LWB_STATE_FLUSH_DONE;
+
+ mutex_exit(&zilog->zl_lock);
+
+ while ((itx = list_remove_head(&itxs)) != NULL)
zil_itx_destroy(itx);
+ list_destroy(&itxs);
- while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
+ while ((zcw = list_remove_head(&waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
- ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
* order for this error handling to work correctly. This
* includes ZIO errors from either this LWB's write or
* flush, as well as any errors from other dependent LWBs
* (e.g. a root LWB ZIO that might be a child of this LWB).
*
* With that said, it's important to note that LWB flush
* errors are not propagated up to the LWB root ZIO.
* This is incorrect behavior, and results in VDEV flush
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
-
- mutex_exit(&zilog->zl_lock);
+ list_destroy(&waiters);
mutex_enter(&zilog->zl_lwb_io_lock);
txg = lwb->lwb_issued_txg;
ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0);
zilog->zl_lwb_inflight[txg & TXG_MASK]--;
if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0)
cv_broadcast(&zilog->zl_lwb_io_cv);
mutex_exit(&zilog->zl_lwb_io_lock);
}
/*
* Wait for the completion of all issued write/flush of that txg provided.
* It guarantees zil_lwb_flush_vdevs_done() is called and returned.
*/
static void
zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg)
{
ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa));
mutex_enter(&zilog->zl_lwb_io_lock);
while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0)
cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lwb_io_lock);
#ifdef ZFS_DEBUG
mutex_enter(&zilog->zl_lock);
mutex_enter(&zilog->zl_lwb_io_lock);
lwb_t *lwb = list_head(&zilog->zl_lwb_list);
while (lwb != NULL && lwb->lwb_max_txg <= txg) {
if (lwb->lwb_issued_txg <= txg) {
ASSERT(lwb->lwb_state != LWB_STATE_ISSUED);
ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE);
IMPLY(lwb->lwb_issued_txg > 0,
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE,
lwb->lwb_buf == NULL);
lwb = list_next(&zilog->zl_lwb_list, lwb);
}
mutex_exit(&zilog->zl_lwb_io_lock);
mutex_exit(&zilog->zl_lock);
#endif
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
ASSERT(!BP_IS_GANG(zio->io_bp));
ASSERT(!BP_IS_HOLE(zio->io_bp));
ASSERT(BP_GET_FILL(zio->io_bp) == 0);
abd_free(zio->io_abd);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
lwb->lwb_buf = NULL;
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_write_zio = NULL;
lwb->lwb_fastwrite = FALSE;
nlwb = list_next(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*
* Additionally, we don't perform any further error handling at
* this point (e.g. setting "zcw_zio_error" appropriately), as
* we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
* we expect any error seen here, to have been propagated to
* that function).
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL && !vd->vdev_nowritecache) {
/*
* The "ZIO_FLAG_DONT_PROPAGATE" is currently
* always used within "zio_flush". This means,
* any errors when flushing the vdev(s), will
* (unfortunately) not be handled correctly,
* since these "zio_flush" errors will not be
* propagated up to "zil_lwb_flush_vdevs_done".
*/
zio_flush(lwb->lwb_root_zio, vd);
}
kmem_free(zv, sizeof (*zv));
}
}
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zilog->zl_lock));
/*
* The zilog's "zl_last_lwb_opened" field is used to build the
* lwb/zio dependency chain, which is used to preserve the
* ordering of lwb completions that is required by the semantics
* of the ZIL. Each new lwb zio becomes a parent of the
* "previous" lwb zio, such that the new lwb's zio cannot
* complete until the "previous" lwb's zio completes.
*
* This is required by the semantics of zil_commit(); the commit
* waiters attached to the lwbs will be woken in the lwb zio's
* completion callback, so this zio dependency graph ensures the
* waiters are woken in the correct order (the same order the
* lwbs were created).
*/
if (last_lwb_opened != NULL &&
last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED ||
last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE);
ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio,
last_lwb_opened->lwb_root_zio);
/*
* If the previous lwb's write hasn't already completed,
* we also want to order the completion of the lwb write
* zios (above, we only order the completion of the lwb
* root zios). This is required because of how we can
* defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
* When the DKIOCFLUSHWRITECACHE commands are deferred,
* the previous lwb will rely on this lwb to flush the
* vdevs written to by that previous lwb. Thus, we need
* to ensure this lwb doesn't issue the flush until
* after the previous lwb's write completes. We ensure
* this ordering by setting the zio parent/child
* relationship here.
*
* Without this relationship on the lwb's write zio,
* it's possible for this lwb's write to complete prior
* to the previous lwb's write completing; and thus, the
* vdevs for the previous lwb would be flushed prior to
* that lwb's data being written to those vdevs (the
* vdevs are flushed in the lwb write zio's completion
* handler, zil_lwb_write_done()).
*/
if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL);
zio_add_child(lwb->lwb_write_zio,
last_lwb_opened->lwb_write_zio);
}
}
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. To do this, the lwb's zio
* structures are created, and linked to the lwb. This function is
* idempotent; if the passed in lwb has already been opened, this
* function is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
zbookmark_phys_t zb;
zio_priority_t prio;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
+ if (lwb->lwb_root_zio != NULL)
+ return;
+
+ lwb->lwb_root_zio = zio_root(zilog->zl_spa,
+ zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
+
+ abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
+ BP_GET_LSIZE(&lwb->lwb_blk));
+
+ if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
+ prio = ZIO_PRIORITY_SYNC_WRITE;
+ else
+ prio = ZIO_PRIORITY_ASYNC_WRITE;
+
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
/* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
mutex_enter(&zilog->zl_lock);
- if (lwb->lwb_root_zio == NULL) {
- abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
- BP_GET_LSIZE(&lwb->lwb_blk));
-
- if (!lwb->lwb_fastwrite) {
- metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
- lwb->lwb_fastwrite = 1;
- }
-
- if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
- prio = ZIO_PRIORITY_SYNC_WRITE;
- else
- prio = ZIO_PRIORITY_ASYNC_WRITE;
-
- lwb->lwb_root_zio = zio_root(zilog->zl_spa,
- zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
- ASSERT3P(lwb->lwb_root_zio, !=, NULL);
+ if (!lwb->lwb_fastwrite) {
+ metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
+ lwb->lwb_fastwrite = 1;
+ }
- lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
- zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
- BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
- prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb);
- ASSERT3P(lwb->lwb_write_zio, !=, NULL);
+ lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, zilog->zl_spa, 0,
+ &lwb->lwb_blk, lwb_abd, BP_GET_LSIZE(&lwb->lwb_blk),
+ zil_lwb_write_done, lwb, prio,
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb);
- lwb->lwb_state = LWB_STATE_OPENED;
+ lwb->lwb_state = LWB_STATE_OPENED;
- zil_lwb_set_zio_dependency(zilog, lwb);
- zilog->zl_last_lwb_opened = lwb;
- }
+ zil_lwb_set_zio_dependency(zilog, lwb);
+ zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
-
- ASSERT3P(lwb->lwb_root_zio, !=, NULL);
- ASSERT3P(lwb->lwb_write_zio, !=, NULL);
- ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
static const struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
{ 131072, 131072 }, /* < 128KB writes */
{ 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
- * Start a log block write and advance to the next log block.
- * Calls are serialized.
+ * Close the log block for being issued and allocate the next one.
+ * Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
-zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
+zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *nlwb = NULL;
zil_chain_t *zilc;
spa_t *spa = zilog->zl_spa;
blkptr_t *bp;
dmu_tx_t *tx;
uint64_t txg;
- uint64_t zil_blksz, wsz;
+ uint64_t zil_blksz;
int i, error;
boolean_t slog;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
- if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
- zilc = (zil_chain_t *)lwb->lwb_buf;
- bp = &zilc->zc_next_blk;
- } else {
- zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
- bp = &zilc->zc_next_blk;
+ /*
+ * If this lwb includes indirect writes, we have to commit before
+ * creating the transaction, otherwise we may end up in dead lock.
+ */
+ if (lwb->lwb_indirect) {
+ for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
+ itx = list_next(&lwb->lwb_itxs, itx))
+ zil_lwb_commit(zilog, lwb, itx);
+ lwb->lwb_nused = lwb->lwb_nfilled;
}
- ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
-
/*
* Allocate the next block and save its address in this block
* before writing it in order to establish the log chain.
*/
tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lwb_io_lock);
lwb->lwb_issued_txg = txg;
zilog->zl_lwb_inflight[txg & TXG_MASK]++;
zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg);
mutex_exit(&zilog->zl_lwb_io_lock);
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
+ DTRACE_PROBE3(zil__block__size, zilog_t *, zilog,
+ uint64_t, zil_blksz,
+ uint64_t, zilog->zl_prev_blks[zilog->zl_prev_rotor]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
+ if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2)
+ zilc = (zil_chain_t *)lwb->lwb_buf;
+ else
+ zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
+ bp = &zilc->zc_next_blk;
BP_ZERO(bp);
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
- if (slog) {
- ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
- ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
- lwb->lwb_nused);
- } else {
- ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
- ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
- lwb->lwb_nused);
- }
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
/*
* Allocate a new log write block (lwb).
*/
nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
}
+ lwb->lwb_state = LWB_STATE_ISSUED;
+
+ dmu_tx_commit(tx);
+
+ /*
+ * If there was an allocation failure then nlwb will be null which
+ * forces a txg_wait_synced().
+ */
+ return (nlwb);
+}
+
+/*
+ * Finalize previously closed block and issue the write zio.
+ * Does not require locking.
+ */
+static void
+zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
+{
+ zil_chain_t *zilc;
+ int wsz;
+
+ /* Actually fill the lwb with the data if not yet. */
+ if (!lwb->lwb_indirect) {
+ for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
+ itx = list_next(&lwb->lwb_itxs, itx))
+ zil_lwb_commit(zilog, lwb, itx);
+ lwb->lwb_nused = lwb->lwb_nfilled;
+ }
+
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
/* For Slim ZIL only write what is used. */
- wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
- ASSERT3U(wsz, <=, lwb->lwb_sz);
+ wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, int);
+ ASSERT3S(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
wsz = lwb->lwb_write_zio->io_size;
+ zilc = (zil_chain_t *)lwb->lwb_buf;
} else {
wsz = lwb->lwb_sz;
+ zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
}
-
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
/*
* clear unused data for security
*/
memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
+ if (lwb->lwb_slog) {
+ ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
+ lwb->lwb_nused);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write,
+ wsz);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc,
+ BP_GET_LSIZE(&lwb->lwb_blk));
+ } else {
+ ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes,
+ lwb->lwb_nused);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write,
+ wsz);
+ ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc,
+ BP_GET_LSIZE(&lwb->lwb_blk));
+ }
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
-
zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
- lwb->lwb_state = LWB_STATE_ISSUED;
-
zio_nowait(lwb->lwb_root_zio);
zio_nowait(lwb->lwb_write_zio);
-
- dmu_tx_commit(tx);
-
- /*
- * If there was an allocation failure then nlwb will be null which
- * forces a txg_wait_synced().
- */
- return (nlwb);
}
/*
* Maximum amount of data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog, size_t hdrsize)
{
return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize);
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 8);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. For space efficiency, we want to fit two records into a
* max-sized log block.
*/
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
sizeof (lr_write_t));
}
+/*
+ * Estimate space needed in the lwb for the itx. Allocate more lwbs or
+ * split the itx as needed, but don't touch the actual transaction data.
+ * Has to be called under zl_issuer_lock to call zil_lwb_write_close()
+ * to chain more lwbs.
+ */
static lwb_t *
-zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
+zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
{
- lr_t *lrcb, *lrc;
- lr_write_t *lrwb, *lrw;
- char *lr_buf;
- uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data;
+ itx_t *citx;
+ lr_t *lr, *clr;
+ lr_write_t *lrw;
+ uint64_t dlen, dnow, lwb_sp, reclen, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
- lrc = &itx->itx_lr;
- lrw = (lr_write_t *)lrc;
+ lr = &itx->itx_lr;
+ lrw = (lr_write_t *)lr;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
- if (lrc->lrc_txtype == TX_COMMIT) {
+ if (lr->lrc_txtype == TX_COMMIT) {
mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
itx->itx_private = NULL;
mutex_exit(&zilog->zl_lock);
+ list_insert_tail(&lwb->lwb_itxs, itx);
return (lwb);
}
- if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
+ if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
- dpad = dlen - lrw->lr_length;
} else {
- dlen = dpad = 0;
+ dlen = 0;
}
- reclen = lrc->lrc_reclen;
+ reclen = lr->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
- txg = lrc->lrc_txg;
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t));
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
- lwb = zil_lwb_write_issue(zilog, lwb);
+ list_insert_tail(ilwbs, lwb);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL)
return (NULL);
zil_lwb_write_open(zilog, lwb);
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
/*
* There must be enough space in the new, empty log block to
* hold reclen. For WR_COPIED, we need to fit the whole
* record in one block, and reclen is the header size + the
* data size. For WR_NEED_COPY, we can create multiple
* records, splitting the data into multiple blocks, so we
* only need to fit one word of data per block; in this case
* reclen is just the header size (no data).
*/
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
- lr_buf = lwb->lwb_buf + lwb->lwb_nused;
- memcpy(lr_buf, lrc, reclen);
- lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
- lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
+ if (dlen > dnow) {
+ ASSERT3U(lr->lrc_txtype, ==, TX_WRITE);
+ ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY);
+ citx = zil_itx_clone(itx);
+ clr = &citx->itx_lr;
+ lr_write_t *clrw = (lr_write_t *)clr;
+ clrw->lr_length = dnow;
+ lrw->lr_offset += dnow;
+ lrw->lr_length -= dnow;
+ } else {
+ citx = itx;
+ clr = lr;
+ }
+
+ /*
+ * We're actually making an entry, so update lrc_seq to be the
+ * log record sequence number. Note that this is generally not
+ * equal to the itx sequence number because not all transactions
+ * are synchronous, and sometimes spa_sync() gets there first.
+ */
+ clr->lrc_seq = ++zilog->zl_lr_seq;
+
+ lwb->lwb_nused += reclen + dnow;
+ ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
+ ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
+
+ zil_lwb_add_txg(lwb, lr->lrc_txg);
+ list_insert_tail(&lwb->lwb_itxs, citx);
+
+ dlen -= dnow;
+ if (dlen > 0) {
+ zilog->zl_cur_used += reclen;
+ goto cont;
+ }
+
+ /*
+ * We have to really issue all queued LWBs before we may have to
+ * wait for a txg sync. Otherwise we may end up in a dead lock.
+ */
+ if (lr->lrc_txtype == TX_WRITE) {
+ boolean_t frozen = lr->lrc_txg > spa_freeze_txg(zilog->zl_spa);
+ if (frozen || itx->itx_wr_state == WR_INDIRECT) {
+ lwb_t *tlwb;
+ while ((tlwb = list_remove_head(ilwbs)) != NULL)
+ zil_lwb_write_issue(zilog, tlwb);
+ }
+ if (itx->itx_wr_state == WR_INDIRECT)
+ lwb->lwb_indirect = B_TRUE;
+ if (frozen)
+ txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg);
+ }
+
+ return (lwb);
+}
+
+/*
+ * Fill the actual transaction data into the lwb, following zil_lwb_assign().
+ * Does not require locking.
+ */
+static void
+zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
+{
+ lr_t *lr, *lrb;
+ lr_write_t *lrw, *lrwb;
+ char *lr_buf;
+ uint64_t dlen, reclen;
+
+ lr = &itx->itx_lr;
+ lrw = (lr_write_t *)lr;
+
+ if (lr->lrc_txtype == TX_COMMIT)
+ return;
+
+ if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
+ dlen = P2ROUNDUP_TYPED(
+ lrw->lr_length, sizeof (uint64_t), uint64_t);
+ } else {
+ dlen = 0;
+ }
+ reclen = lr->lrc_reclen;
+ ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled);
+
+ lr_buf = lwb->lwb_buf + lwb->lwb_nfilled;
+ memcpy(lr_buf, lr, reclen);
+ lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */
+ lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zilog, zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
- if (lrc->lrc_txtype == TX_WRITE) {
- if (txg > spa_freeze_txg(zilog->zl_spa))
- txg_wait_synced(zilog->zl_dmu_pool, txg);
+ if (lr->lrc_txtype == TX_WRITE) {
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zilog, zil_itx_copied_count);
ZIL_STAT_INCR(zilog, zil_itx_copied_bytes,
lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
- lrcb->lrc_reclen += dnow;
- if (lrwb->lr_length > dnow)
- lrwb->lr_length = dnow;
- lrw->lr_offset += dnow;
- lrw->lr_length -= dnow;
+ lrb->lrc_reclen += dlen;
ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count);
ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes,
- dnow);
+ dlen);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zilog, zil_itx_indirect_count);
ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes,
lrw->lr_length);
}
/*
* We pass in the "lwb_write_zio" rather than
* "lwb_root_zio" so that the "lwb_write_zio"
* becomes the parent of any zio's created by
* the "zl_get_data" callback. The vdevs are
* flushed after the "lwb_write_zio" completes,
* so we want to make sure that completion
* callback waits for these additional zio's,
* such that the vdevs used by those zio's will
* be included in the lwb's vdev tree, and those
* vdevs will be properly flushed. If we passed
* in "lwb_root_zio" here, then these additional
* vdevs may not be flushed; e.g. if these zio's
* completed after "lwb_write_zio" completed.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
lwb->lwb_write_zio);
- if (dbuf != NULL && error == 0 && dnow == dlen)
+ if (dbuf != NULL && error == 0) {
/* Zero any padding bytes in the last block. */
- memset((char *)dbuf + lrwb->lr_length, 0, dpad);
+ memset((char *)dbuf + lrwb->lr_length, 0,
+ dlen - lrwb->lr_length);
+ }
/*
* Typically, the only return values we should see from
* ->zl_get_data() are 0, EIO, ENOENT, EEXIST or
* EALREADY. However, it is also possible to see other
* error values such as ENOSPC or EINVAL from
* dmu_read() -> dnode_hold() -> dnode_hold_impl() or
* ENXIO as well as a multitude of others from the
* block layer through dmu_buf_hold() -> dbuf_read()
* -> zio_wait(), as well as through dmu_read() ->
* dnode_hold() -> dnode_hold_impl() -> dbuf_read() ->
* zio_wait(). When these errors happen, we can assume
* that neither an immediate write nor an indirect
* write occurred, so we need to fall back to
* txg_wait_synced(). This is unusual, so we print to
* dmesg whenever one of these errors occurs.
*/
switch (error) {
case 0:
break;
default:
cmn_err(CE_WARN, "zil_lwb_commit() received "
"unexpected error %d from ->zl_get_data()"
". Falling back to txg_wait_synced().",
error);
zfs_fallthrough;
case EIO:
- txg_wait_synced(zilog->zl_dmu_pool, txg);
+ if (lwb->lwb_indirect) {
+ txg_wait_synced(zilog->zl_dmu_pool,
+ lr->lrc_txg);
+ } else {
+ lwb->lwb_write_zio->io_error = error;
+ }
zfs_fallthrough;
case ENOENT:
zfs_fallthrough;
case EEXIST:
zfs_fallthrough;
case EALREADY:
- return (lwb);
+ return;
}
}
}
- /*
- * We're actually making an entry, so update lrc_seq to be the
- * log record sequence number. Note that this is generally not
- * equal to the itx sequence number because not all transactions
- * are synchronous, and sometimes spa_sync() gets there first.
- */
- lrcb->lrc_seq = ++zilog->zl_lr_seq;
- lwb->lwb_nused += reclen + dnow;
-
- zil_lwb_add_txg(lwb, txg);
-
- ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
- ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
-
- dlen -= dnow;
- if (dlen > 0) {
- zilog->zl_cur_used += reclen;
- goto cont;
- }
-
- return (lwb);
+ lwb->lwb_nfilled += reclen + dlen;
+ ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
+ ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
}
itx_t *
zil_itx_create(uint64_t txtype, size_t olrsize)
{
size_t itxsize, lrsize;
itx_t *itx;
lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
+static itx_t *
+zil_itx_clone(itx_t *oitx)
+{
+ itx_t *itx = zio_data_buf_alloc(oitx->itx_size);
+ memcpy(itx, oitx, oitx->itx_size);
+ itx->itx_callback = NULL;
+ itx->itx_callback_data = NULL;
+ return (itx);
+}
+
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_remove_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
- * zil_lwb_commit(), and free'd in that function. Having
+ * zil_lwb_assign(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_remove_head(list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_remove_head(&clean_list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static void
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
mutex_exit(&itxg->itxg_lock);
}
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
-zil_process_commit_list(zilog_t *zilog)
+zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb, *plwb;
itx_t *itx;
boolean_t first = B_TRUE;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_is_empty(&zilog->zl_itx_commit_list))
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
/*
* Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will
* have already been created (zl_lwb_list not empty).
*/
zil_commit_activate_saxattr_feature(zilog);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
first = (lwb->lwb_state != LWB_STATE_OPENED) &&
((plwb = list_prev(&zilog->zl_lwb_list, lwb)) == NULL ||
plwb->lwb_state == LWB_STATE_FLUSH_DONE);
}
while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
- lwb = zil_lwb_commit(zilog, itx, lwb);
-
- if (lwb == NULL)
+ lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs);
+ if (lwb == NULL) {
list_insert_tail(&nolwb_itxs, itx);
- else
- list_insert_tail(&lwb->lwb_itxs, itx);
+ } else if ((zcw->zcw_lwb != NULL &&
+ zcw->zcw_lwb != lwb) || zcw->zcw_done) {
+ /*
+ * Our lwb is done, leave the rest of
+ * itx list to somebody else who care.
+ */
+ first = B_FALSE;
+ break;
+ }
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
-
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
+ while ((lwb = list_remove_head(ilwbs)) != NULL)
+ zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
zil_commit_waiter_skip(zcw);
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
zil_itx_destroy(itx);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in one of the following states: "closed"
* or "open".
*
* If it's "closed", then no itxs have been committed to
* it, so there's no point in issuing its zio (i.e. it's
* "empty").
*
* If it's "open", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring
* on the system, such that this function will be
* immediately called again (not necessarily by the same
* thread) and this lwb's zio will be issued via
- * zil_lwb_commit(). This way, the lwb is guaranteed to
+ * zil_lwb_assign(). This way, the lwb is guaranteed to
* be "full" when it is issued to disk, and we'll make
* use of the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, such that this lwb's zio isn't issued via
- * zil_lwb_commit(), zil_commit_waiter() will issue the
+ * zil_lwb_assign(), zil_commit_waiter() will issue the
* lwb's zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*
* If we had no already running or open LWBs, it can be
* the workload is single-threaded. And if the ZIL write
* latency is very small or if the LWB is almost full, it
* may be cheaper to bypass the delay.
*/
if (lwb->lwb_state == LWB_STATE_OPENED && first) {
hrtime_t sleep = zilog->zl_last_lwb_latency *
zfs_commit_timeout_pct / 100;
if (sleep < zil_min_commit_timeout ||
lwb->lwb_sz - lwb->lwb_nused < lwb->lwb_sz / 8) {
- lwb = zil_lwb_write_issue(zilog, lwb);
+ list_insert_tail(ilwbs, lwb);
+ lwb = zil_lwb_write_close(zilog, lwb);
zilog->zl_cur_used = 0;
- if (lwb == NULL)
+ if (lwb == NULL) {
+ while ((lwb = list_remove_head(ilwbs))
+ != NULL)
+ zil_lwb_write_issue(zilog, lwb);
zil_commit_writer_stall(zilog);
+ }
}
}
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static void
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
+ list_t ilwbs;
+ lwb_t *lwb;
+
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
+ list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zilog, zil_commit_writer_count);
zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
- zil_process_commit_list(zilog);
+ zil_process_commit_list(zilog, zcw, &ilwbs);
out:
mutex_exit(&zilog->zl_issuer_lock);
+ while ((lwb = list_remove_head(&ilwbs)) != NULL)
+ zil_lwb_write_issue(zilog, lwb);
+ list_destroy(&ilwbs);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
return;
/*
- * In order to call zil_lwb_write_issue() we must hold the
+ * In order to call zil_lwb_write_close() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
- if (zcw->zcw_done)
+ if (zcw->zcw_done) {
+ lwb = NULL;
goto out;
+ }
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
* _can_ transition from ISSUED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's ISSUED or OPENED, and block any other threads that might
* attempt to issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
- * zil_lwb_write_issue() if the lwb had already been issued.
+ * zil_lwb_write_close() if the lwb had already been issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
- lwb->lwb_state == LWB_STATE_FLUSH_DONE)
+ lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
+ lwb = NULL;
goto out;
+ }
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
- lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
+ lwb_t *nlwb = zil_lwb_write_close(zilog, lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
- * When zil_lwb_write_issue() returns NULL, this
+ * When zil_lwb_write_close() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*
* We must drop the commit waiter's lock prior to
* calling zil_commit_writer_stall() or else we can wind
* up with the following deadlock:
*
* - This thread is waiting for the txg to sync while
* holding the waiter's lock; txg_wait_synced() is
* used within txg_commit_writer_stall().
*
* - The txg can't sync because it is waiting for this
* lwb's zio callback to call dmu_tx_commit().
*
* - The lwb's zio callback can't call dmu_tx_commit()
* because it's blocked trying to acquire the waiter's
* lock, which occurs prior to calling dmu_tx_commit()
*/
mutex_exit(&zcw->zcw_lock);
+ zil_lwb_write_issue(zilog, lwb);
+ lwb = NULL;
zil_commit_writer_stall(zilog);
mutex_enter(&zcw->zcw_lock);
}
out:
mutex_exit(&zilog->zl_issuer_lock);
+ if (lwb)
+ zil_lwb_write_issue(zilog, lwb);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
- * the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
+ * the ZIL, the lwb's zio will be issued via the zil_lwb_assign()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit itx is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zilog, zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
zil_lwb_flush_wait_all(zilog, txg);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
memset(zh, 0, sizeof (zil_header_t));
memset(zilog->zl_replayed_seq, 0,
sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
} else {
/*
* A destroyed ZIL chain can't contain any TX_SETSAXATTR
* records. So, deactivate the feature for this dataset.
* We activate it again when we start a new ZIL chain.
*/
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_ZILSAXATTR))
dsl_dataset_deactivate_feature(ds,
SPA_FEATURE_ZILSAXATTR, tx);
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_state != LWB_STATE_FLUSH_DONE ||
lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_is_empty(&zilog->zl_lwb_list))
BP_ZERO(&zh->zh_log);
}
/*
* Remove fastwrite on any blocks that have been pre-allocated for
* the next commit. This prevents fastwrite counter pollution by
* unused, long-lived LWBs.
*/
for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 0;
}
}
mutex_exit(&zilog->zl_lock);
}
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
(void) unused, (void) kmflag;
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
static void
zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_sums_init(&zil_sums_global);
zil_kstats_global = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_kstats_global != NULL) {
zil_kstats_global->ks_data = &zil_stats;
zil_kstats_global->ks_update = zil_kstats_global_update;
zil_kstats_global->ks_private = NULL;
kstat_install(zil_kstats_global);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_kstats_global != NULL) {
kstat_delete(zil_kstats_global);
zil_kstats_global = NULL;
}
zil_sums_fini(&zil_sums_global);
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
mutex_destroy(&zilog->zl_lwb_io_lock);
cv_destroy(&zilog->zl_cv_suspend);
cv_destroy(&zilog->zl_lwb_io_cv);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
zilog->zl_sums = zil_sums;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL)
txg = zilog->zl_dirty_max_txg;
else
txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
mutex_exit(&zilog->zl_lock);
/*
* zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends
* on the time when the dmu_tx transaction is assigned in
- * zil_lwb_write_issue().
+ * zil_lwb_write_close().
*/
mutex_enter(&zilog->zl_lwb_io_lock);
txg = MAX(zilog->zl_lwb_max_issued_txg, txg);
mutex_exit(&zilog->zl_lwb_io_lock);
/*
* We need to use txg_wait_synced() to wait until that txg is synced.
* zil_sync() will guarantee all lwbs up to that txg have been
* written out, flushed, and cleaned.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_remove_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static const char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t *const *zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
memcpy(zr->zr_lr, lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
(void) bp, (void) arg, (void) claim_txg;
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
* Return B_TRUE if there were any entries to replay.
*/
boolean_t
zil_replay(objset_t *os, void *arg,
zil_replay_func_t *const replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
return (zil_destroy(zilog, B_TRUE));
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
return (B_TRUE);
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
int
zil_reset(const char *osname, void *arg)
{
(void) arg;
int error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
EXPORT_SYMBOL(zil_sums_init);
EXPORT_SYMBOL(zil_sums_fini);
EXPORT_SYMBOL(zil_kstat_values_update);
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, min_commit_timeout, U64, ZMOD_RW,
"Minimum delay we care for ZIL block commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index c17ca5e1d651..d7b2217623e6 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5167 +1,5154 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/brt.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *const zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
static int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
static kmem_cache_t *zio_cache;
static kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
static uint_t zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
/* defer frees starting in this pass */
uint_t zfs_sync_pass_deferred_free = 2;
/* don't compress starting in this pass */
static uint_t zfs_sync_pass_dont_compress = 8;
/* rewrite new bps starting in this pass */
static uint_t zfs_sync_pass_rewrite = 2;
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
static int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
static const int zio_buf_debug_limit = 16384;
#else
static const int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t data_cflags, cflags;
data_cflags = KMC_NODEBUG;
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name,
size, align, NULL, NULL, NULL, NULL, NULL,
cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, data_cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
(void) size;
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size,
&zio->io_prop.zp_complevel);
abd_return_buf_copy(data, tmp, size);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but when this was done,
* we had run out of bits in what is now zio_flag_t. Future cleanup
* could make this a flag bit.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
pio->io_child_count++;
cio->io_parent_count++;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
pio->io_child_count--;
cio->io_parent_count--;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. We only do this if the parent's zio type matches the
* child's type. Otherwise dispatch the parent zio in its
* own taskq.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL &&
pio->io_type == zio->io_type) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
zio_flag_t flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
memset(zio, 0, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
zio->io_bp = (blkptr_t *)bp;
zio->io_bp_copy = *bp;
zio->io_bp_orig = *bp;
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT)
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, zio_flag_t flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("bad blkptr at %px: "
"DVA[0]=%#llx/%#llx "
"DVA[1]=%#llx/%#llx "
"DVA[2]=%#llx/%#llx "
"prop=%#llx "
"pad=%#llx,%#llx "
"phys_birth=%#llx "
"birth=%#llx "
"fill=%#llx "
"cksum=%#llx/%#llx/%#llx/%#llx",
bp,
(long long)bp->blk_dva[0].dva_word[0],
(long long)bp->blk_dva[0].dva_word[1],
(long long)bp->blk_dva[1].dva_word[0],
(long long)bp->blk_dva[1].dva_word[1],
(long long)bp->blk_dva[2].dva_word[0],
(long long)bp->blk_dva[2].dva_word[1],
(long long)bp->blk_prop,
(long long)bp->blk_pad[0],
(long long)bp->blk_pad[1],
(long long)bp->blk_phys_birth,
(long long)bp->blk_birth,
(long long)bp->blk_fill,
(long long)bp->blk_cksum.zc_word[0],
(long long)bp->blk_cksum.zc_word[1],
(long long)bp->blk_cksum.zc_word[2],
(long long)bp->blk_cksum.zc_word[3]);
switch (blk_verify) {
case BLK_VERIFY_HALT:
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Values for blk_verify_flag:
* BLK_VERIFY_ONLY: evaluate the block
* BLK_VERIFY_LOG: evaluate the block and log problems
* BLK_VERIFY_HALT: call zfs_panic_recover on error
*
* Values for blk_config_flag:
* BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
* BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
* obtained for reader
* BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
* performance
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
return (errors == 0);
switch (blk_config) {
case BLK_CONFIG_HELD:
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
break;
case BLK_CONFIG_NEEDED:
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
break;
case BLK_CONFIG_SKIP:
return (errors == 0);
default:
panic("invalid blk_config %u", blk_config);
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (blk_config == BLK_CONFIG_NEEDED)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
(void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *private, zio_priority_t priority, zio_flag_t flags,
const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_physdone = physdone;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
boolean_t brtwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
ASSERT(!brtwrite || !nopwrite);
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_brtwrite = brtwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
brt_maybe_exists(spa, bp)) {
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_flag_t flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
brt_maybe_exists(spa, bp)) {
/*
* GANG, DEDUP and BRT blocks can induce a read (for the gang
* block header, the DDT or the BRT), so issue them
* asynchronously so that this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, zio_flag_t flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
zio_flag_t flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, zio_flag_t flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
zio_flag_t flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
zio->io_physdone = pio->io_physdone;
if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
zio->io_logical->io_phys_children++;
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, zio_flag_t flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
}
- if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
- zio->io_flags |= ZIO_FLAG_DONT_CACHE;
-
- if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
- zio->io_flags |= ZIO_FLAG_DONT_CACHE;
-
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zp->zp_brtwrite)
return (zio);
ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
uint32_t pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
spa_max_replication(spa)) == BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = NULL;
psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
zp->zp_complevel);
if (psize == 0) {
compress = ZIO_COMPRESS_OFF;
} else if (psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
if (cbuf != NULL)
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
size_t rounded = (size_t)roundup(psize,
spa->spa_min_alloc);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize, zp->zp_complevel);
if (psize == 0 || psize >= lsize)
compress = ZIO_COMPRESS_OFF;
} else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
!(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
/*
* If we are raw receiving an encrypted dataset we should not
* take this codepath because it will change the on-disk block
* and decryption will fail.
*/
size_t rounded = MIN((size_t)roundup(psize,
spa->spa_min_alloc), lsize);
if (rounded != psize) {
abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, rounded, NULL);
}
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if ((zio->io_priority == ZIO_PRIORITY_NOW ||
zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
&zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
zio_interrupt(zio);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
zio_interrupt, zio, TQ_NOSLEEP,
expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%llx stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, (u_longlong_t)pio->io_flags,
pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, const char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#else
(void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
(void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(zio->io_child_count == 0);
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* If one copy was requested, store 2 copies of the GBH, so that we
* can still traverse all the data (e.g. to free or scrub) even if a
* block is damaged. Note that we can't store 3 copies of the GBH in
* all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
*/
int gbh_copies = copies;
if (gbh_copies == 1) {
gbh_copies = MIN(2, spa_max_replication(spa));
}
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
memset(gbh, 0, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
/*
* We didn't allocate this bp, so make sure it doesn't get unmarked.
*/
pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp_orig->blk_dva[d]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Block Reference Table
* ==========================================================================
*/
static zio_t *
zio_brt_free(zio_t *zio)
{
blkptr_t *bp;
bp = zio->io_bp;
if (BP_GET_LEVEL(bp) > 0 ||
BP_IS_METADATA(bp) ||
!brt_maybe_exists(zio->io_spa, bp)) {
return (zio);
}
if (!brt_entry_decref(zio->io_spa, bp)) {
/*
* This isn't the last reference, so we cannot free
* the data yet.
*/
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
zio_t *cio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
static ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
zbookmark_phys_t *bm = &zio->io_bookmark;
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio->io_size,
zio->io_prop.zp_type, zio->io_prop.zp_level,
zio->io_prop.zp_zpl_smallblk);
zio->io_metaslab_class = mc;
}
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
int allocator = (uint_t)cityhash4(0, 0, 0,
os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_noalloc) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
- if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
- return (zio);
-
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
vdev_queue_io_done(zio);
- if (zio->io_type == ZIO_TYPE_WRITE)
- vdev_cache_write(zio);
-
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
- zio->io_flags |= ZIO_FLAG_IO_RETRY |
- ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
+ zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
zio->io_physdone != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
zio->io_physdone(zio->io_logical);
}
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
}
}
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (bp != NULL && BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark,
&zio->io_bp->blk_birth);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, 0, &zio->io_tqent);
}
return (NULL);
}
ASSERT(zio->io_child_count == 0);
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
!BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
!(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_brt_free,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT0(last_block->zb_level);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
/*
* This function is similar to zbookmark_subtree_completed(), but returns true
* if subtree_root is equal or ahead of last_block, i.e. still to be done.
*/
boolean_t
zbookmark_subtree_tbd(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
ASSERT0(last_block->zb_level);
if (dnp == NULL)
return (B_FALSE);
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
last_block) >= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 06bc75c634a6..cd4e6f0c7558 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -1,1794 +1,1791 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot and module load. No user command
* needs to be run before opening and using a device.
*
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
/*
* Note on locking of zvol state structures.
*
* These structures are used to maintain internal state used to emulate block
* devices on top of zvols. In particular, management of device minor number
* operations - create, remove, rename, and set_snapdev - involves access to
* these structures. The zvol_state_lock is primarily used to protect the
* zvol_state_list. The zv->zv_state_lock is used to protect the contents
* of the zvol_state_t structures, as well as to make sure that when the
* time comes to remove the structure from the list, it is not in use, and
* therefore, it can be taken off zvol_state_list and freed.
*
* The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
* e.g. for the duration of receive and rollback operations. This lock can be
* held for significant periods of time. Given that it is undesirable to hold
* mutexes for long periods of time, the following lock ordering applies:
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
*
* The minor operations are issued to spa->spa_zvol_taskq queues, that are
* single-threaded (to preserve order of minor operations), and are executed
* through the zvol_task_cb that dispatches the specific operations. Therefore,
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
* allocated and placed on zvol_state_list, and then other minor operations
* for this zvol are going to proceed in the order of issue.
*
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
unsigned int zvol_inhibit_dev = 0;
unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
struct hlist_head *zvol_htable;
static list_t zvol_state_list;
krwlock_t zvol_state_lock;
typedef enum {
ZVOL_ASYNC_REMOVE_MINORS,
ZVOL_ASYNC_RENAME_MINORS,
ZVOL_ASYNC_SET_SNAPDEV,
ZVOL_ASYNC_SET_VOLMODE,
ZVOL_ASYNC_MAX
} zvol_async_op_t;
typedef struct {
zvol_async_op_t op;
char name1[MAXNAMELEN];
char name2[MAXNAMELEN];
uint64_t value;
} zvol_task_t;
uint64_t
zvol_name_hash(const char *name)
{
int i;
uint64_t crc = -1ULL;
const uint8_t *p = (const uint8_t *)name;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
}
return (crc);
}
/*
* Find a zvol_state_t given the name and hash generated by zvol_name_hash.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
zvol_state_t *
zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
{
zvol_state_t *zv;
struct hlist_node *p = NULL;
rw_enter(&zvol_state_lock, RW_READER);
hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
zv = hlist_entry(p, zvol_state_t, zv_hlink);
mutex_enter(&zv->zv_state_lock);
if (zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
/*
* this is the right zvol, take the locks in the
* right order
*/
if (mode != RW_NONE &&
!rw_tryenter(&zv->zv_suspend_lock, mode)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, mode);
mutex_enter(&zv->zv_state_lock);
/*
* zvol cannot be renamed as we continue
* to hold zvol_state_lock
*/
ASSERT(zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN)
== 0);
}
rw_exit(&zvol_state_lock);
return (zv);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (NULL);
}
/*
* Find a zvol_state_t given the name.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
static zvol_state_t *
zvol_find_by_name(const char *name, int mode)
{
return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
}
/*
* ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
*/
void
zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
nvlist_t *nvprops = zct->zct_props;
int error;
uint64_t volblocksize, volsize;
VERIFY(nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
/*
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
VERIFY(nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
ASSERT(error == 0);
}
/*
* ZFS_IOC_OBJSET_STATS entry point.
*/
int
zvol_get_stats(objset_t *os, nvlist_t *nv)
{
int error;
dmu_object_info_t *doi;
uint64_t val;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
return (SET_ERROR(error));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
doi->doi_data_block_size);
}
kmem_free(doi, sizeof (dmu_object_info_t));
return (SET_ERROR(error));
}
/*
* Sanity check volume size.
*/
int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > SPEC_MAXOFFSET_T)
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
/*
* Ensure the zap is flushed then inform the VFS of the capacity change.
*/
static int
zvol_update_volsize(uint64_t volsize, objset_t *os)
{
dmu_tx_t *tx;
int error;
uint64_t txg;
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (SET_ERROR(error));
}
txg = dmu_tx_get_txg(tx);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
&volsize, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
if (error == 0)
error = dmu_free_long_range(os,
ZVOL_OBJ, volsize, DMU_OBJECT_END);
return (error);
}
/*
* Set ZFS_PROP_VOLSIZE set entry point. Note that modifying the volume
* size will result in a udev "change" event being generated.
*/
int
zvol_set_volsize(const char *name, uint64_t volsize)
{
objset_t *os = NULL;
uint64_t readonly;
int error;
boolean_t owned = B_FALSE;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
return (SET_ERROR(error));
if (readonly)
return (SET_ERROR(EROFS));
zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
RW_READ_HELD(&zv->zv_suspend_lock)));
if (zv == NULL || zv->zv_objset == NULL) {
if (zv != NULL)
rw_exit(&zv->zv_suspend_lock);
if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(error));
}
owned = B_TRUE;
if (zv != NULL)
zv->zv_objset = os;
} else {
os = zv->zv_objset;
}
dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
(error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
goto out;
error = zvol_update_volsize(volsize, os);
if (error == 0 && zv != NULL) {
zv->zv_volsize = volsize;
zv->zv_changed = 1;
}
out:
kmem_free(doi, sizeof (dmu_object_info_t));
if (owned) {
dmu_objset_disown(os, B_TRUE, FTAG);
if (zv != NULL)
zv->zv_objset = NULL;
} else {
rw_exit(&zv->zv_suspend_lock);
}
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
if (error == 0 && zv != NULL)
zvol_os_update_volsize(zv, volsize);
return (SET_ERROR(error));
}
/*
* Sanity check volume block size.
*/
int
zvol_check_volblocksize(const char *name, uint64_t volblocksize)
{
/* Record sizes above 128k need the feature to be enabled */
if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
int error;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (volblocksize > zfs_max_recordsize)
return (SET_ERROR(EDOM));
spa_close(spa, FTAG);
}
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (SET_ERROR(EDOM));
return (0);
}
/*
* Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
* implement DKIOCFREE/free-long-range.
*/
static int
zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_truncate_t *lr = arg2;
uint64_t offset, length;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
int error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
length);
}
return (error);
}
/*
* Replay a TX_WRITE ZIL transaction that didn't get committed
* after a system failure
*/
static int
zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_write_t *lr = arg2;
objset_t *os = zv->zv_objset;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
uint64_t offset, length;
dmu_tx_t *tx;
int error;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
}
return (error);
}
/*
* Replay a TX_CLONE_RANGE ZIL transaction that didn't get committed
* after a system failure.
*
* TODO: For now we drop block cloning transations for ZVOLs as they are
* unsupported, but we still need to inform BRT about that as we
* claimed them during pool import.
* This situation can occur when we try to import a pool from a ZFS
* version supporting block cloning for ZVOLs into a system that
* has this ZFS version, that doesn't support block cloning for ZVOLs.
*/
static int
zvol_replay_clone_range(void *arg1, void *arg2, boolean_t byteswap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zvol_state_t *zv = arg1;
objset_t *os = zv->zv_objset;
lr_clone_range_t *lr = arg2;
blkptr_t *bp;
dmu_tx_t *tx;
spa_t *spa;
uint_t ii;
int error;
dmu_objset_name(os, name);
cmn_err(CE_WARN, "ZFS dropping block cloning transaction for %s.",
name);
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
tx = dmu_tx_create(os);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
spa = os->os_spa;
for (ii = 0; ii < lr->lr_nbps; ii++) {
bp = &lr->lr_bps[ii];
if (!BP_IS_HOLE(bp)) {
zio_free(spa, dmu_tx_get_txg(tx), bp);
}
}
(void) zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
return (0);
}
static int
zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
{
(void) arg1, (void) arg2, (void) byteswap;
return (SET_ERROR(ENOTSUP));
}
/*
* Callback vectors for replaying records.
* Only TX_WRITE and TX_TRUNCATE are needed for zvol.
*/
zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE] = {
zvol_replay_err, /* no such transaction type */
zvol_replay_err, /* TX_CREATE */
zvol_replay_err, /* TX_MKDIR */
zvol_replay_err, /* TX_MKXATTR */
zvol_replay_err, /* TX_SYMLINK */
zvol_replay_err, /* TX_REMOVE */
zvol_replay_err, /* TX_RMDIR */
zvol_replay_err, /* TX_LINK */
zvol_replay_err, /* TX_RENAME */
zvol_replay_write, /* TX_WRITE */
zvol_replay_truncate, /* TX_TRUNCATE */
zvol_replay_err, /* TX_SETATTR */
zvol_replay_err, /* TX_ACL */
zvol_replay_err, /* TX_CREATE_ATTR */
zvol_replay_err, /* TX_CREATE_ACL_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL */
zvol_replay_err, /* TX_MKDIR_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
zvol_replay_err, /* TX_WRITE2 */
zvol_replay_err, /* TX_SETSAXATTR */
zvol_replay_err, /* TX_RENAME_EXCHANGE */
zvol_replay_err, /* TX_RENAME_WHITEOUT */
zvol_replay_clone_range /* TX_CLONE_RANGE */
};
/*
* zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
*
* We store data in the log buffers if it's small enough.
* Otherwise we will later flush the data out via dmu_sync().
*/
static const ssize_t zvol_immediate_write_sz = 32768;
void
zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
uint64_t size, int sync)
{
uint32_t blocksize = zv->zv_volblocksize;
zilog_t *zilog = zv->zv_zilog;
itx_wr_state_t write_state;
uint64_t sz = size;
if (zil_replaying(zilog, tx))
return;
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
size >= blocksize && blocksize > zvol_immediate_write_sz)
write_state = WR_INDIRECT;
else if (sync)
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
while (size) {
itx_t *itx;
lr_write_t *lr;
itx_wr_state_t wr_state = write_state;
ssize_t len = size;
if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
wr_state = WR_NEED_COPY;
else if (wr_state == WR_INDIRECT)
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
}
itx->itx_wr_state = wr_state;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = offset;
lr->lr_length = len;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
itx->itx_private = zv;
itx->itx_sync = sync;
(void) zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
}
if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
dsl_pool_wrlog_count(zilog->zl_dmu_pool, sz, tx->tx_txg);
}
}
/*
* Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
*/
void
zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
boolean_t sync)
{
itx_t *itx;
lr_truncate_t *lr;
zilog_t *zilog = zv->zv_zilog;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
lr = (lr_truncate_t *)&itx->itx_lr;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = off;
lr->lr_length = len;
itx->itx_sync = sync;
zil_itx_assign(zilog, itx, tx);
}
static void
zvol_get_done(zgd_t *zgd, int error)
{
(void) error;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
kmem_free(zgd, sizeof (zgd_t));
}
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zvol_state_t *zv = arg;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's written out
* and its checksum is being calculated that no one can change
* the data. Contrarily to zfs_get_data we need not re-check
* blocksize after we get the lock because it cannot be changed.
*/
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db != NULL);
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zvol_get_done, zgd);
if (error == 0)
return (0);
}
}
zvol_get_done(zgd, error);
return (SET_ERROR(error));
}
/*
* The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
*/
void
zvol_insert(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_insert_head(&zvol_state_list, zv);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
}
/*
* Simply remove the zvol from to list of zvols.
*/
static void
zvol_remove(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_remove(&zvol_state_list, zv);
hlist_del(&zv->zv_hlink);
}
/*
* Setup zv after we just own the zv->objset
*/
static int
zvol_setup_zv(zvol_state_t *zv)
{
uint64_t volsize;
int error;
uint64_t ro;
objset_t *os = zv->zv_objset;
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
zv->zv_zilog = NULL;
zv->zv_flags &= ~ZVOL_WRITTEN_TO;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
return (SET_ERROR(error));
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
return (SET_ERROR(error));
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
return (SET_ERROR(error));
zvol_os_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
zvol_os_set_disk_ro(zv, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
zvol_os_set_disk_ro(zv, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
return (0);
}
/*
* Shutdown every zv_objset related stuff except zv_objset itself.
* The is the reverse of zvol_setup_zv.
*/
static void
zvol_shutdown_zv(zvol_state_t *zv)
{
ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
RW_LOCK_HELD(&zv->zv_suspend_lock));
if (zv->zv_flags & ZVOL_WRITTEN_TO) {
ASSERT(zv->zv_zilog != NULL);
zil_close(zv->zv_zilog);
}
zv->zv_zilog = NULL;
dnode_rele(zv->zv_dn, zv);
zv->zv_dn = NULL;
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
(void) dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
* return the proper tag for rollback and recv
*/
void *
zvol_tag(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
return (zv->zv_open_count > 0 ? zv : NULL);
}
/*
* Suspend the zvol for recv and rollback.
*/
zvol_state_t *
zvol_suspend(const char *name)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
return (NULL);
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
zvol_shutdown_zv(zv);
/*
* do not hold zv_state_lock across suspend/resume to
* avoid locking up zvol lookups
*/
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
return (zv);
}
int
zvol_resume(zvol_state_t *zv)
{
int error = 0;
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_open_count > 0) {
VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
dmu_objset_rele(zv->zv_objset, zv);
error = zvol_setup_zv(zv);
}
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
/*
* We need this because we don't hold zvol_state_lock while releasing
* zv_suspend_lock. zvol_remove_minors_impl thus cannot check
* zv_suspend_lock to determine it is safe to free because rwlock is
* not inherent atomic.
*/
atomic_dec(&zv->zv_suspend_ref);
return (SET_ERROR(error));
}
int
zvol_first_open(zvol_state_t *zv, boolean_t readonly)
{
objset_t *os;
int error;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(mutex_owned(&spa_namespace_lock));
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
return (SET_ERROR(error));
zv->zv_objset = os;
error = zvol_setup_zv(zv);
if (error) {
dmu_objset_disown(os, 1, zv);
zv->zv_objset = NULL;
}
return (error);
}
void
zvol_last_close(zvol_state_t *zv)
{
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zvol_shutdown_zv(zv);
dmu_objset_disown(zv->zv_objset, 1, zv);
zv->zv_objset = NULL;
}
typedef struct minors_job {
list_t *list;
list_node_t link;
/* input */
char *name;
/* output */
int error;
} minors_job_t;
/*
* Prefetch zvol dnodes for the minors_job
*/
static void
zvol_prefetch_minors_impl(void *arg)
{
minors_job_t *job = arg;
char *dsname = job->name;
objset_t *os = NULL;
job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
FTAG, &os);
if (job->error == 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
dmu_objset_disown(os, B_TRUE, FTAG);
}
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_snap_minor_cb(const char *dsname, void *arg)
{
minors_job_t *j = arg;
list_t *minors_list = j->list;
const char *name = j->name;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
/* skip the designated dataset */
if (name && strcmp(dsname, name) == 0)
return (0);
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
"%s is not a snapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
}
return (0);
}
/*
* If spa_keystore_load_wkey() is called for an encrypted zvol,
* we need to look for any clones also using the key. This function
* is "best effort" - so we just skip over it if there are failures.
*/
static void
zvol_add_clones(const char *dsname, list_t *minors_list)
{
/* Also check if it has clones */
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
if (dsl_pool_hold(dsname, FTAG, &dp) != 0)
return;
if (!spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_ENCRYPTION))
goto out;
if (dsl_dir_hold(dp, dsname, FTAG, &dd, NULL) != 0)
goto out;
if (dsl_dir_phys(dd)->dd_clones == 0)
goto out;
zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
objset_t *mos = dd->dd_pool->dp_meta_objset;
for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
minors_job_t *job;
if (dsl_dataset_hold_obj(dd->dd_pool,
za->za_first_integer, FTAG, &clone) == 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(clone, name);
char *n = kmem_strdup(name);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
dsl_dataset_rele(clone, FTAG);
}
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
out:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_minors_cb(const char *dsname, void *arg)
{
uint64_t snapdev;
int error;
list_t *minors_list = arg;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
if (error)
return (0);
/*
* Given the name and the 'snapdev' property, create device minor nodes
* with the linkages to zvols/snapshots as needed.
* If the name represents a zvol, create a minor node for the zvol, then
* check if its snapshots are 'visible', and if so, iterate over the
* snapshots and create device minor nodes for those.
*/
if (strchr(dsname, '@') == 0) {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
zvol_add_clones(dsname, minors_list);
if (snapdev == ZFS_SNAPDEV_VISIBLE) {
/*
* traverse snapshots only, do not traverse children,
* and skip the 'dsname'
*/
(void) dmu_objset_find(dsname,
zvol_create_snap_minor_cb, (void *)job,
DS_FIND_SNAPSHOTS);
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
dsname);
}
return (0);
}
/*
* Create minors for the specified dataset, including children and snapshots.
* Pay attention to the 'snapdev' property and iterate over the snapshots
* only if they are 'visible'. This approach allows one to assure that the
* snapshot metadata is read from disk only if it is needed.
*
* The name can represent a dataset to be recursively scanned for zvols and
* their snapshots, or a single zvol snapshot. If the name represents a
* dataset, the scan is performed in two nested stages:
* - scan the dataset for zvols, and
* - for each zvol, create a minor node, then check if the zvol's snapshots
* are 'visible', and only then iterate over the snapshots if needed
*
* If the name represents a snapshot, a check is performed if the snapshot is
* 'visible' (which also verifies that the parent is a zvol), and if so,
* a minor node for that snapshot is created.
*/
void
zvol_create_minors_recursive(const char *name)
{
list_t minors_list;
minors_job_t *job;
if (zvol_inhibit_dev)
return;
/*
* This is the list for prefetch jobs. Whenever we found a match
* during dmu_objset_find, we insert a minors_job to the list and do
* taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
* any lock because all list operation is done on the current thread.
*
* We will use this list to do zvol_os_create_minor after prefetch
* so we don't have to traverse using dmu_objset_find again.
*/
list_create(&minors_list, sizeof (minors_job_t),
offsetof(minors_job_t, link));
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name, "snapdev",
&snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
fstrans_cookie_t cookie = spl_fstrans_mark();
(void) dmu_objset_find(name, zvol_create_minors_cb,
&minors_list, DS_FIND_CHILDREN);
spl_fstrans_unmark(cookie);
}
taskq_wait_outstanding(system_taskq, 0);
/*
* Prefetch is completed, we can do zvol_os_create_minor
* sequentially.
*/
- while ((job = list_head(&minors_list)) != NULL) {
- list_remove(&minors_list, job);
+ while ((job = list_remove_head(&minors_list)) != NULL) {
if (!job->error)
(void) zvol_os_create_minor(job->name);
kmem_strfree(job->name);
kmem_free(job, sizeof (minors_job_t));
}
list_destroy(&minors_list);
}
void
zvol_create_minor(const char *name)
{
/*
* Note: the dsl_pool_config_lock must not be held.
* Minor node creation needs to obtain the zvol_state_lock.
* zvol_open() obtains the zvol_state_lock and then the dsl pool
* config lock. Therefore, we can't have the config lock now if
* we are going to wait for the zvol_state_lock, because it
* would be a lock order inversion which could lead to deadlock.
*/
if (zvol_inhibit_dev)
return;
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name,
"snapdev", &snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) zvol_os_create_minor(name);
} else {
(void) zvol_os_create_minor(name);
}
}
/*
* Remove minors for specified dataset including children and snapshots.
*/
static void
zvol_free_task(void *arg)
{
zvol_os_free(arg);
}
void
zvol_remove_minors_impl(const char *name)
{
zvol_state_t *zv, *zv_next;
int namelen = ((name) ? strlen(name) : 0);
taskqid_t t;
list_t free_list;
if (zvol_inhibit_dev)
return;
list_create(&free_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
(strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zvol_os_clear_private(zv);
/* Drop zv_state_lock before zvol_free() */
mutex_exit(&zv->zv_state_lock);
/* Try parallel zv_free, if failed do it in place */
t = taskq_dispatch(system_taskq, zvol_free_task, zv,
TQ_SLEEP);
if (t == TASKQID_INVALID)
list_insert_head(&free_list, zv);
} else {
mutex_exit(&zv->zv_state_lock);
}
}
rw_exit(&zvol_state_lock);
/* Drop zvol_state_lock before calling zvol_free() */
- while ((zv = list_head(&free_list)) != NULL) {
- list_remove(&free_list, zv);
+ while ((zv = list_remove_head(&free_list)) != NULL)
zvol_os_free(zv);
- }
}
/* Remove minor for this specific volume only */
static void
zvol_remove_minor_impl(const char *name)
{
zvol_state_t *zv = NULL, *zv_next;
if (zvol_inhibit_dev)
return;
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, name) == 0) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
zvol_os_clear_private(zv);
mutex_exit(&zv->zv_state_lock);
break;
} else {
mutex_exit(&zv->zv_state_lock);
}
}
/* Drop zvol_state_lock before calling zvol_free() */
rw_exit(&zvol_state_lock);
if (zv != NULL)
zvol_os_free(zv);
}
/*
* Rename minors for specified dataset including children and snapshots.
*/
static void
zvol_rename_minors_impl(const char *oldname, const char *newname)
{
zvol_state_t *zv, *zv_next;
int oldnamelen;
if (zvol_inhibit_dev)
return;
oldnamelen = strlen(oldname);
rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, oldname) == 0) {
zvol_os_rename_minor(zv, newname);
} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
(zv->zv_name[oldnamelen] == '/' ||
zv->zv_name[oldnamelen] == '@')) {
char *name = kmem_asprintf("%s%c%s", newname,
zv->zv_name[oldnamelen],
zv->zv_name + oldnamelen + 1);
zvol_os_rename_minor(zv, name);
kmem_strfree(name);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
}
typedef struct zvol_snapdev_cb_arg {
uint64_t snapdev;
} zvol_snapdev_cb_arg_t;
static int
zvol_set_snapdev_cb(const char *dsname, void *param)
{
zvol_snapdev_cb_arg_t *arg = param;
if (strchr(dsname, '@') == NULL)
return (0);
switch (arg->snapdev) {
case ZFS_SNAPDEV_VISIBLE:
(void) zvol_os_create_minor(dsname);
break;
case ZFS_SNAPDEV_HIDDEN:
(void) zvol_remove_minor_impl(dsname);
break;
}
return (0);
}
static void
zvol_set_snapdev_impl(char *name, uint64_t snapdev)
{
zvol_snapdev_cb_arg_t arg = {snapdev};
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* The zvol_set_snapdev_sync() sets snapdev appropriately
* in the dataset hierarchy. Here, we only scan snapshots.
*/
dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
spl_fstrans_unmark(cookie);
}
static void
zvol_set_volmode_impl(char *name, uint64_t volmode)
{
fstrans_cookie_t cookie;
uint64_t old_volmode;
zvol_state_t *zv;
if (strchr(name, '@') != NULL)
return;
/*
* It's unfortunate we need to remove minors before we create new ones:
* this is necessary because our backing gendisk (zvol_state->zv_disk)
* could be different when we set, for instance, volmode from "geom"
* to "dev" (or vice versa).
*/
zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
return;
if (zv != NULL) {
old_volmode = zv->zv_volmode;
mutex_exit(&zv->zv_state_lock);
if (old_volmode == volmode)
return;
zvol_wait_close(zv);
}
cookie = spl_fstrans_mark();
switch (volmode) {
case ZFS_VOLMODE_NONE:
(void) zvol_remove_minor_impl(name);
break;
case ZFS_VOLMODE_GEOM:
case ZFS_VOLMODE_DEV:
(void) zvol_remove_minor_impl(name);
(void) zvol_os_create_minor(name);
break;
case ZFS_VOLMODE_DEFAULT:
(void) zvol_remove_minor_impl(name);
if (zvol_volmode == ZFS_VOLMODE_NONE)
break;
else /* if zvol_volmode is invalid defaults to "geom" */
(void) zvol_os_create_minor(name);
break;
}
spl_fstrans_unmark(cookie);
}
static zvol_task_t *
zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
uint64_t value)
{
zvol_task_t *task;
/* Never allow tasks on hidden names. */
if (name1[0] == '$')
return (NULL);
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->op = op;
task->value = value;
strlcpy(task->name1, name1, MAXNAMELEN);
if (name2 != NULL)
strlcpy(task->name2, name2, MAXNAMELEN);
return (task);
}
static void
zvol_task_free(zvol_task_t *task)
{
kmem_free(task, sizeof (zvol_task_t));
}
/*
* The worker thread function performed asynchronously.
*/
static void
zvol_task_cb(void *arg)
{
zvol_task_t *task = arg;
switch (task->op) {
case ZVOL_ASYNC_REMOVE_MINORS:
zvol_remove_minors_impl(task->name1);
break;
case ZVOL_ASYNC_RENAME_MINORS:
zvol_rename_minors_impl(task->name1, task->name2);
break;
case ZVOL_ASYNC_SET_SNAPDEV:
zvol_set_snapdev_impl(task->name1, task->value);
break;
case ZVOL_ASYNC_SET_VOLMODE:
zvol_set_volmode_impl(task->name1, task->value);
break;
default:
VERIFY(0);
break;
}
zvol_task_free(task);
}
typedef struct zvol_set_prop_int_arg {
const char *zsda_name;
uint64_t zsda_value;
zprop_source_t zsda_source;
dmu_tx_t *zsda_tx;
} zvol_set_prop_int_arg_t;
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t snapdev;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply snapdev appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "snapdev" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = snapdev;
return (dsl_sync_task(ddname, zvol_set_snapdev_check,
zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
static int
zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
(void) arg;
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t volmode;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply volmode appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "volmode" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = volmode;
return (dsl_sync_task(ddname, zvol_set_volmode_check,
zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
void
zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
boolean_t
zvol_is_zvol(const char *name)
{
return (zvol_os_is_zvol(name));
}
int
zvol_init_impl(void)
{
int i;
list_create(&zvol_state_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
KM_SLEEP);
for (i = 0; i < ZVOL_HT_SIZE; i++)
INIT_HLIST_HEAD(&zvol_htable[i]);
return (0);
}
void
zvol_fini_impl(void)
{
zvol_remove_minors_impl(NULL);
/*
* The call to "zvol_remove_minors_impl" may dispatch entries to
* the system_taskq, but it doesn't wait for those entries to
* complete before it returns. Thus, we must wait for all of the
* removals to finish, before we can continue.
*/
taskq_wait_outstanding(system_taskq, 0);
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
}
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 9ed1a6d37a97..342f56d50d04 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1,982 +1,983 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains all of the common functional tests. When
# adding a new test consider also adding it to the sanity.run file
# if the new test runs to completion in only a few seconds.
#
# Approximate run time: 4-5 hours
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['dosmode', 'posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
'alloc_class_013_pos', 'alloc_class_014_neg', 'alloc_class_015_pos']
tags = ['functional', 'alloc_class']
[tests/functional/append]
tests = ['file_append', 'threadsappend_001_pos']
tags = ['functional', 'append']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
tags = ['functional', 'atime']
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/btree]
tests = ['btree_positive', 'btree_negative']
tags = ['functional', 'btree']
pre =
post =
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_rename', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy',
'tst.terminate_by_signal'
]
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/checksum]
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'run_blake3_test',
'filetest_001_pos', 'filetest_002_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
'zdb_display_block', 'zdb_encrypted', 'zdb_label_checksum',
'zdb_object_range_neg', 'zdb_object_range_pos', 'zdb_objset_id',
- 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2']
+ 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2', 'zdb_backup']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested',
'zfs_clone_rm_nested']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_and_disable',
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted', 'zfs_diff_mangle']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override',
'receive-o-x_props_aliases',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props',
'zfs_receive_-wR-encrypted-mix', 'zfs_receive_corrective',
'zfs_receive_compressed_corrective', 'zfs_receive_large_block_corrective']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_encrypted_unloaded',
'zfs_send_raw', 'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
'zfs_unshare_007_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq', 'zfs_wait_getsubopt']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zhack]
tests = ['zhack_label_repair_001', 'zhack_label_repair_002',
'zhack_label_repair_003', 'zhack_label_repair_004']
pre =
post =
tags = ['functional', 'cli_root', 'zhack']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_023_neg', 'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
'zpool_create_features_009_pos', 'create-o_ashift',
'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
'zpool_events_clear_retained']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos', 'vdev_get_001_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3', 'zpool_import_errata4',
'import_cachefile_device_added',
'import_cachefile_device_removed',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing', 'import_log_missing',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_attach_detach_add_remove',
'zpool_initialize_fault_export_import_online',
'zpool_initialize_import_export',
'zpool_initialize_offline_export_import_online',
'zpool_initialize_online_offline',
'zpool_initialize_split',
'zpool_initialize_start_and_cancel_neg',
'zpool_initialize_start_and_cancel_pos',
'zpool_initialize_suspend_resume',
'zpool_initialize_uninit',
'zpool_initialize_unsupported_vdevs',
'zpool_initialize_verify_checksums',
'zpool_initialize_verify_initialized']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
-tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
+tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart',
+ 'zpool_resilver_concurrent']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies',
'zpool_error_scrub_001_pos', 'zpool_error_scrub_002_pos',
'zpool_error_scrub_003_pos', 'zpool_error_scrub_004_pos']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features', 'vdev_set_001_pos',
'user_property_001_pos', 'user_property_002_neg']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
'zpool_split_resilver', 'zpool_split_indirect',
'zpool_split_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
'zpool_status_003_pos', 'zpool_status_004_pos',
'zpool_status_005_pos', 'zpool_status_006_pos',
'zpool_status_007_pos', 'zpool_status_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove',
'zpool_trim_fault_export_import_online',
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
'zpool_trim_verify_trimmed']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege',
'zilstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_005_neg', 'zfs_list_007_pos',
'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/cli_user/zpool_status]
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_status']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
tags = ['functional', 'compression']
[tests/functional/cp_files]
tests = ['cp_files_001_pos']
tags = ['functional', 'cp_files']
[tests/functional/crtime]
tests = ['crtime_001_pos' ]
tags = ['functional', 'crtime']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/fallocate]
tests = ['fallocate_punch-hole']
tags = ['functional', 'fallocate']
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
pre =
post =
tests = ['hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/io]
tests = ['sync', 'psync', 'posixaio', 'mmap']
tags = ['functional', 'io']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/limits]
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
'snapshot_limit']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_001', 'link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_mixed', 'mmap_read_001_pos', 'mmap_seek_001_pos',
'mmap_sync_001_pos', 'mmap_write_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
'enospc_df', 'enospc_ganging', 'enospc_rm']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
'checkpoint_discard_busy', 'checkpoint_discard_many',
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/pyzfs]
tests = ['pyzfs_unittest']
pre =
post =
tags = ['functional', 'pyzfs']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
'redacted_size', 'redacted_volume']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
'redundancy_draid3', 'redundancy_draid_damaged1',
'redundancy_draid_damaged2', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
tags = ['functional', 'redundancy']
timeout = 1200
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg', 'refquota_008_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
'refreserv_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
'removal_condense_export', 'removal_multiple_indirection',
'removal_nopwrite', 'removal_remap_deadlists',
'removal_resume_export', 'removal_sanity', 'removal_with_add',
'removal_with_create_fs', 'removal_with_dedup',
'removal_with_errors', 'removal_with_export', 'removal_with_indirect',
'removal_with_ganging', 'removal_with_faulted',
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
'removal_with_send_recv', 'removal_with_snapshot',
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
'remove_indirect', 'remove_attach_mirror', 'removal_reservation']
tags = ['functional', 'removal']
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
'attach_resilver', 'detach', 'rebuild_disabled_feature',
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos', 'rsend_025_pos',
'rsend_026_neg', 'rsend_027_pos', 'rsend_028_neg', 'rsend_029_neg',
'rsend_030_pos', 'rsend_031_pos', 'send-c_verify_ratio',
'send-c_verify_contents', 'send-c_props', 'send-c_incremental',
'send-c_volume', 'send-c_zstream_recompress', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_incremental',
'send_encrypted_freeobjects', 'send_encrypted_hierarchy',
'send_encrypted_props', 'send_encrypted_truncated_files',
'send_freeobjects', 'send_realloc_files', 'send_realloc_encrypted_files',
'send_spill_block', 'send_holds', 'send_hole_birth', 'send_mixed_raw',
'send-wR_encrypted_zvol', 'send_partial_dataset', 'send_invalid',
'send_doall', 'send_raw_spill_block', 'send_raw_ashift',
'send_raw_large_blocks']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
'slog_replay_fs_002', 'slog_replay_volume', 'slog_016_pos']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_017_pos', 'snapshot_018_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/stat]
tests = ['stat_001_pos']
tags = ['functional', 'stat']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none', 'suid_write_zil_replay']
tags = ['functional', 'suid']
[tests/functional/trim]
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
'trim_integrity', 'trim_config', 'trim_l2arc']
tags = ['functional', 'trim']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
'userspace_send_encrypted', 'userspace_encrypted_13709']
tags = ['functional', 'userquota']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos', 'xattr_compat']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
'zvol_misc_snapdev', 'zvol_misc_trim', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_stress]
tests = ['zvol_stress']
tags = ['functional', 'zvol', 'zvol_stress']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/l2arc]
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
tags = ['functional', 'l2arc']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/runfiles/freebsd.run b/sys/contrib/openzfs/tests/runfiles/freebsd.run
index c7ca1d769fc3..13696d645850 100644
--- a/sys/contrib/openzfs/tests/runfiles/freebsd.run
+++ b/sys/contrib/openzfs/tests/runfiles/freebsd.run
@@ -1,27 +1,32 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/cli_root/zfs_jail:FreeBSD]
tests = ['zfs_jail_001_pos']
tags = ['functional', 'cli_root', 'zfs_jail']
+
+[tests/functional/pam:FreeBSD]
+tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
+ 'pam_short_password']
+tags = ['functional', 'pam']
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index 4df770d61f07..618eeb934017 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,209 +1,210 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos', 'zfs_share_013_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_unshare:Linux]
tests = ['zfs_unshare_008_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill',
'zed_cksum_reported', 'zed_cksum_config', 'zed_io_config']
tags = ['functional', 'events']
[tests/functional/fadvise:Linux]
tests = ['fadvise_sequential']
tags = ['functional', 'fadvise']
[tests/functional/fallocate:Linux]
tests = ['fallocate_prealloc', 'fallocate_zero-range']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_spare_001_pos', 'auto_spare_002_pos',
'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_shared',
'decrypt_fault', 'decompress_fault', 'scrub_after_resilver',
'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
[tests/functional/largest_pool:Linux]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos', 'mmap_sync_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
-tests = ['pam_basic', 'pam_nounmount', 'pam_short_password']
+tests = ['pam_basic', 'pam_change_unmounted', 'pam_nounmount', 'pam_recursive',
+ 'pam_short_password']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/dos_attributes:Linux]
tests = ['read_dos_attrs_001', 'write_dos_attrs_001']
tags = ['functional', 'dos_attributes']
[tests/functional/renameat2:Linux]
tests = ['renameat2_noreplace', 'renameat2_exchange', 'renameat2_whiteout']
tags = ['functional', 'renameat2']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files']
tags = ['functional', 'rsend']
[tests/functional/simd:Linux]
pre =
post =
tests = ['simd_supported']
tags = ['functional', 'simd']
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001', 'user_namespace_002', 'user_namespace_003',
'user_namespace_004']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
[tests/functional/zvol/zvol_misc:Linux]
tests = ['zvol_misc_fua']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/idmap_mount:Linux]
tests = ['idmap_mount_001', 'idmap_mount_002', 'idmap_mount_003',
'idmap_mount_004', 'idmap_mount_005']
tags = ['functional', 'idmap_mount']
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index 3f7498f5c6bf..9517ce8073a5 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,452 +1,457 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 3.6+.
#
import os
import re
import sys
import argparse
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require that the kernel supports renameat2 syscall.
#
renameat2_reason = 'Kernel renameat2 support required'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require the statx(2) system call on Linux which was first
# introduced in the 4.11 kernel.
#
statx_reason = 'Kernel statx(2) system call required on Linux'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
#
# Some tests on FreeBSD require the fspacectl(2) system call and the
# truncate(1) utility supporting the -d option. The system call was first
# introduced in FreeBSD version 1400032.
#
fspacectl_reason = 'fspacectl(2) and truncate -d support required'
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
#
# Idmapped mount is only supported in kernel version >= 5.12
#
idmap_reason = 'Idmapped mount needs kernel 5.12+'
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', 7633],
'casenorm/mixed_formd_lookup_ci': ['FAIL', 7633],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
+ 'pool_checkpoint/checkpoint_discard_busy': ['SKIP', 12053],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', 6066],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
+ 'cli_root/zpool_resilver/zpool_resilver_concurrent':
+ ['SKIP', na_reason],
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'cli_root/zfs_unshare/zfs_unshare_008_pos': ['SKIP', na_reason],
'link_count/link_count_001': ['SKIP', na_reason],
'casenorm/mixed_create_failure': ['FAIL', 13215],
'mmap/mmap_sync_001_pos': ['SKIP', na_reason],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', 7633],
'casenorm/mixed_formd_delete': ['FAIL', 7633],
'casenorm/sensitive_formd_lookup': ['FAIL', 7633],
'casenorm/sensitive_formd_delete': ['FAIL', 7633],
'removal/removal_with_zdb': ['SKIP', known_reason],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'append/threadsappend_001_pos': ['FAIL', 6136],
'chattr/setup': ['SKIP', exec_reason],
'crtime/crtime_001_pos': ['SKIP', statx_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', 6145],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', 6839],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', 11948],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', 6141],
'delegate/setup': ['SKIP', exec_reason],
'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason],
'history/history_004_pos': ['FAIL', 7026],
'history/history_005_neg': ['FAIL', 6680],
'history/history_006_neg': ['FAIL', 5657],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pam/setup': ['SKIP', "pamtester might be not available"],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', 11946],
'projectquota/setup': ['SKIP', exec_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'renameat2/setup': ['SKIP', renameat2_reason],
'reservation/reservation_008_pos': ['FAIL', 7741],
'reservation/reservation_018_pos': ['FAIL', 5642],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', 7961],
'snapshot/snapshot_010_pos': ['FAIL', 7961],
'snapused/snapused_004_pos': ['FAIL', 5513],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', known_reason],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', 5848],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', 11829],
'resilver/resilver_restart_001': ['FAIL', known_reason],
'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622],
'pool_checkpoint/checkpoint_indirect': ['FAIL', 12623],
'snapshot/snapshot_002_pos': ['FAIL', '14831'],
})
elif sys.platform.startswith('linux'):
maybe.update({
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'fault/auto_online_002_pos': ['FAIL', 11889],
'fault/auto_replace_001_pos': ['FAIL', 14851],
'fault/auto_spare_002_pos': ['FAIL', 11889],
'fault/auto_spare_multiple': ['FAIL', 11889],
'fault/auto_spare_shared': ['FAIL', 11889],
'fault/decompress_fault': ['FAIL', 11889],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', 12621],
'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
+ 'zvol/zvol_misc/zvol_misc_fua': ['SKIP', 14872],
+ 'zvol/zvol_misc/zvol_misc_trim': ['SKIP', 14872],
'idmap_mount/idmap_mount_001': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_002': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_003': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_004': ['SKIP', idmap_reason],
'idmap_mount/idmap_mount_005': ['SKIP', idmap_reason],
})
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', 11546],
})
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file:', e)
sys.exit(1)
prefix = '/zfs-tests/tests/(?:functional|perf/regression)/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
rf'\s*\S*{prefix}(\S+)' + \
r'\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]'
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
logdir = 'Could not determine log directory.'
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
logdir = m.group(1)
return d, logdir
class ListMaybesAction(argparse.Action):
def __init__(self,
option_strings,
dest="SUPPRESS",
default="SUPPRESS",
help="list flaky tests and exit"):
super(ListMaybesAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
for test in maybe:
print(test)
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
parser.add_argument('logfile')
parser.add_argument('--list-maybes', action=ListMaybesAction)
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
args = parser.parse_args()
results, logdir = process_results(args.logfile)
if not results:
print("\n\nNo test results were found.")
print("Log directory:", logdir)
sys.exit(0)
expected = []
unexpected = []
all_maybes = True
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if (test in known and results[test] in known[test][0]):
expected.append(test)
elif test in maybe and results[test] in maybe[test][0]:
if results[test] == 'SKIP' or args.maybes:
expected.append(test)
elif not args.maybes:
unexpected.append(test)
else:
unexpected.append(test)
all_maybes = False
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif isinstance(known[test][1], int):
expect = f"{issue_url}{known[test][1]}"
else:
expect = known[test][1]
elif test in maybe:
if isinstance(maybe[test][1], int):
expect = f"{issue_url}{maybe[test][1]}"
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(f" {results[test]} {test} ({expect})")
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(f" {results[test]} {test} (expected {known[test][0]})")
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(f" {results[test]} {test} (expected {expect})")
if len(unexpected) == 0:
sys.exit(0)
elif not args.maybes and all_maybes:
sys.exit(2)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
index 9a34bf559be0..fda9229915ce 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test.c
@@ -1,557 +1,557 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/avl.h>
#include <sys/btree.h>
#include <sys/time.h>
#include <sys/resource.h>
#define BUFSIZE 256
static int seed = 0;
static int stress_timeout = 180;
static int contents_frequency = 100;
static int tree_limit = 64 * 1024;
static boolean_t stress_only = B_FALSE;
static void
usage(int exit_value)
{
(void) fprintf(stderr, "Usage:\tbtree_test -n <test_name>\n");
(void) fprintf(stderr, "\tbtree_test -s [-r <seed>] [-l <limit>] "
"[-t timeout>] [-c check_contents]\n");
(void) fprintf(stderr, "\tbtree_test [-r <seed>] [-l <limit>] "
"[-t timeout>] [-c check_contents]\n");
(void) fprintf(stderr, "\n With the -n option, run the named "
"negative test. With the -s option,\n");
(void) fprintf(stderr, " run the stress test according to the "
"other options passed. With\n");
(void) fprintf(stderr, " neither, run all the positive tests, "
"including the stress test with\n");
(void) fprintf(stderr, " the default options.\n");
(void) fprintf(stderr, "\n Options that control the stress test\n");
(void) fprintf(stderr, "\t-c stress iterations after which to compare "
"tree contents [default: 100]\n");
(void) fprintf(stderr, "\t-l the largest value to allow in the tree "
"[default: 1M]\n");
(void) fprintf(stderr, "\t-r random seed [default: from "
"gettimeofday()]\n");
(void) fprintf(stderr, "\t-t seconds to let the stress test run "
"[default: 180]\n");
exit(exit_value);
}
typedef struct int_node {
avl_node_t node;
uint64_t data;
} int_node_t;
/*
* Utility functions
*/
static int
avl_compare(const void *v1, const void *v2)
{
const int_node_t *n1 = v1;
const int_node_t *n2 = v2;
uint64_t a = n1->data;
uint64_t b = n2->data;
return (TREE_CMP(a, b));
}
static int
zfs_btree_compare(const void *v1, const void *v2)
{
const uint64_t *a = v1;
const uint64_t *b = v2;
return (TREE_CMP(*a, *b));
}
static void
verify_contents(avl_tree_t *avl, zfs_btree_t *bt)
{
static int count = 0;
zfs_btree_index_t bt_idx = {0};
int_node_t *node;
uint64_t *data;
boolean_t forward = count % 2 == 0 ? B_TRUE : B_FALSE;
count++;
ASSERT3U(avl_numnodes(avl), ==, zfs_btree_numnodes(bt));
if (forward == B_TRUE) {
node = avl_first(avl);
data = zfs_btree_first(bt, &bt_idx);
} else {
node = avl_last(avl);
data = zfs_btree_last(bt, &bt_idx);
}
while (node != NULL) {
ASSERT3U(*data, ==, node->data);
if (forward == B_TRUE) {
data = zfs_btree_next(bt, &bt_idx, &bt_idx);
node = AVL_NEXT(avl, node);
} else {
data = zfs_btree_prev(bt, &bt_idx, &bt_idx);
node = AVL_PREV(avl, node);
}
}
}
static void
verify_node(avl_tree_t *avl, zfs_btree_t *bt, int_node_t *node)
{
zfs_btree_index_t bt_idx = {0};
zfs_btree_index_t bt_idx2 = {0};
int_node_t *inp;
uint64_t data = node->data;
uint64_t *rv = NULL;
ASSERT3U(avl_numnodes(avl), ==, zfs_btree_numnodes(bt));
ASSERT3P((rv = (uint64_t *)zfs_btree_find(bt, &data, &bt_idx)), !=,
NULL);
ASSERT3S(*rv, ==, data);
ASSERT3P(zfs_btree_get(bt, &bt_idx), !=, NULL);
ASSERT3S(data, ==, *(uint64_t *)zfs_btree_get(bt, &bt_idx));
if ((inp = AVL_NEXT(avl, node)) != NULL) {
ASSERT3P((rv = zfs_btree_next(bt, &bt_idx, &bt_idx2)), !=,
NULL);
ASSERT3P(rv, ==, zfs_btree_get(bt, &bt_idx2));
ASSERT3S(inp->data, ==, *rv);
} else {
ASSERT3U(data, ==, *(uint64_t *)zfs_btree_last(bt, &bt_idx));
}
if ((inp = AVL_PREV(avl, node)) != NULL) {
ASSERT3P((rv = zfs_btree_prev(bt, &bt_idx, &bt_idx2)), !=,
NULL);
ASSERT3P(rv, ==, zfs_btree_get(bt, &bt_idx2));
ASSERT3S(inp->data, ==, *rv);
} else {
ASSERT3U(data, ==, *(uint64_t *)zfs_btree_first(bt, &bt_idx));
}
}
/*
* Tests
*/
/* Verify that zfs_btree_find works correctly with a NULL index. */
static int
find_without_index(zfs_btree_t *bt, char *why)
{
u_longlong_t *p, i = 12345;
zfs_btree_add(bt, &i);
if ((p = (u_longlong_t *)zfs_btree_find(bt, &i, NULL)) == NULL ||
*p != i) {
(void) snprintf(why, BUFSIZE, "Unexpectedly found %llu\n",
p == NULL ? 0 : *p);
return (1);
}
i++;
if ((p = (u_longlong_t *)zfs_btree_find(bt, &i, NULL)) != NULL) {
(void) snprintf(why, BUFSIZE, "Found bad value: %llu\n", *p);
return (1);
}
return (0);
}
/* Verify simple insertion and removal from the tree. */
static int
insert_find_remove(zfs_btree_t *bt, char *why)
{
u_longlong_t *p, i = 12345;
zfs_btree_index_t bt_idx = {0};
/* Insert 'i' into the tree, and attempt to find it again. */
zfs_btree_add(bt, &i);
if ((p = (u_longlong_t *)zfs_btree_find(bt, &i, &bt_idx)) == NULL) {
(void) snprintf(why, BUFSIZE, "Didn't find value in tree\n");
return (1);
} else if (*p != i) {
(void) snprintf(why, BUFSIZE, "Found (%llu) in tree\n", *p);
return (1);
}
ASSERT3S(zfs_btree_numnodes(bt), ==, 1);
zfs_btree_verify(bt);
/* Remove 'i' from the tree, and verify it is not found. */
zfs_btree_remove(bt, &i);
if ((p = (u_longlong_t *)zfs_btree_find(bt, &i, &bt_idx)) != NULL) {
(void) snprintf(why, BUFSIZE,
"Found removed value (%llu)\n", *p);
return (1);
}
ASSERT3S(zfs_btree_numnodes(bt), ==, 0);
zfs_btree_verify(bt);
return (0);
}
/*
* Add a number of random entries into a btree and avl tree. Then walk them
* backwards and forwards while emptying the tree, verifying the trees look
* the same.
*/
static int
drain_tree(zfs_btree_t *bt, char *why)
{
avl_tree_t avl;
int i = 0;
int_node_t *node;
avl_index_t avl_idx = {0};
zfs_btree_index_t bt_idx = {0};
avl_create(&avl, avl_compare, sizeof (int_node_t),
offsetof(int_node_t, node));
/* Fill both trees with the same data */
for (i = 0; i < 64 * 1024; i++) {
u_longlong_t randval = random();
if (zfs_btree_find(bt, &randval, &bt_idx) != NULL) {
continue;
}
zfs_btree_add_idx(bt, &randval, &bt_idx);
node = malloc(sizeof (int_node_t));
if (node == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
node->data = randval;
if (avl_find(&avl, node, &avl_idx) != NULL) {
(void) snprintf(why, BUFSIZE,
"Found in avl: %llu\n", randval);
return (1);
}
avl_insert(&avl, node, avl_idx);
}
/* Remove data from either side of the trees, comparing the data */
while (avl_numnodes(&avl) != 0) {
uint64_t *data;
ASSERT3U(avl_numnodes(&avl), ==, zfs_btree_numnodes(bt));
if (avl_numnodes(&avl) % 2 == 0) {
node = avl_first(&avl);
data = zfs_btree_first(bt, &bt_idx);
} else {
node = avl_last(&avl);
data = zfs_btree_last(bt, &bt_idx);
}
ASSERT3U(node->data, ==, *data);
zfs_btree_remove_idx(bt, &bt_idx);
avl_remove(&avl, node);
if (avl_numnodes(&avl) == 0) {
break;
}
node = avl_first(&avl);
ASSERT3U(node->data, ==,
*(uint64_t *)zfs_btree_first(bt, NULL));
node = avl_last(&avl);
ASSERT3U(node->data, ==, *(uint64_t *)zfs_btree_last(bt, NULL));
}
ASSERT3S(zfs_btree_numnodes(bt), ==, 0);
void *avl_cookie = NULL;
while ((node = avl_destroy_nodes(&avl, &avl_cookie)) != NULL)
free(node);
avl_destroy(&avl);
return (0);
}
/*
* This test uses an avl and btree, and continually processes new random
* values. Each value is either removed or inserted, depending on whether
* or not it is found in the tree. The test periodically checks that both
* trees have the same data and does consistency checks. This stress
* option can also be run on its own from the command line.
*/
static int
stress_tree(zfs_btree_t *bt, char *why)
{
(void) why;
avl_tree_t avl;
int_node_t *node;
struct timeval tp;
time_t t0;
int insertions = 0, removals = 0, iterations = 0;
u_longlong_t max = 0, min = UINT64_MAX;
(void) gettimeofday(&tp, NULL);
t0 = tp.tv_sec;
avl_create(&avl, avl_compare, sizeof (int_node_t),
offsetof(int_node_t, node));
while (1) {
zfs_btree_index_t bt_idx = {0};
avl_index_t avl_idx = {0};
uint64_t randval = random() % tree_limit;
node = malloc(sizeof (*node));
if (node == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
node->data = randval;
max = randval > max ? randval : max;
min = randval < min ? randval : min;
void *ret = avl_find(&avl, node, &avl_idx);
if (ret == NULL) {
insertions++;
avl_insert(&avl, node, avl_idx);
ASSERT3P(zfs_btree_find(bt, &randval, &bt_idx), ==,
NULL);
zfs_btree_add_idx(bt, &randval, &bt_idx);
verify_node(&avl, bt, node);
} else {
removals++;
verify_node(&avl, bt, ret);
zfs_btree_remove(bt, &randval);
avl_remove(&avl, ret);
free(ret);
free(node);
}
zfs_btree_verify(bt);
iterations++;
if (iterations % contents_frequency == 0) {
verify_contents(&avl, bt);
}
zfs_btree_verify(bt);
(void) gettimeofday(&tp, NULL);
if (tp.tv_sec > t0 + stress_timeout) {
fprintf(stderr, "insertions/removals: %u/%u\nmax/min: "
"%llu/%llu\n", insertions, removals, max, min);
break;
}
}
void *avl_cookie = NULL;
while ((node = avl_destroy_nodes(&avl, &avl_cookie)) != NULL)
free(node);
avl_destroy(&avl);
if (stress_only) {
zfs_btree_index_t *idx = NULL;
while (zfs_btree_destroy_nodes(bt, &idx) != NULL)
;
zfs_btree_verify(bt);
}
return (0);
}
/*
* Verify inserting a duplicate value will cause a crash.
* Note: negative test; return of 0 is a failure.
*/
static int
insert_duplicate(zfs_btree_t *bt)
{
uint64_t i = 23456;
zfs_btree_index_t bt_idx = {0};
if (zfs_btree_find(bt, &i, &bt_idx) != NULL) {
fprintf(stderr, "Found value in empty tree.\n");
return (0);
}
zfs_btree_add_idx(bt, &i, &bt_idx);
if (zfs_btree_find(bt, &i, &bt_idx) == NULL) {
fprintf(stderr, "Did not find expected value.\n");
return (0);
}
/* Crash on inserting a duplicate */
zfs_btree_add_idx(bt, &i, NULL);
return (0);
}
/*
* Verify removing a non-existent value will cause a crash.
* Note: negative test; return of 0 is a failure.
*/
static int
remove_missing(zfs_btree_t *bt)
{
uint64_t i = 23456;
zfs_btree_index_t bt_idx = {0};
if (zfs_btree_find(bt, &i, &bt_idx) != NULL) {
fprintf(stderr, "Found value in empty tree.\n");
return (0);
}
/* Crash removing a nonexistent entry */
zfs_btree_remove(bt, &i);
return (0);
}
static int
do_negative_test(zfs_btree_t *bt, char *test_name)
{
int rval = 0;
struct rlimit rlim = {0};
(void) setrlimit(RLIMIT_CORE, &rlim);
if (strcmp(test_name, "insert_duplicate") == 0) {
rval = insert_duplicate(bt);
} else if (strcmp(test_name, "remove_missing") == 0) {
rval = remove_missing(bt);
}
/*
* Return 0, since callers will expect non-zero return values for
* these tests, and we should have crashed before getting here anyway.
*/
(void) fprintf(stderr, "Test: %s returned %d.\n", test_name, rval);
return (0);
}
typedef struct btree_test {
const char *name;
int (*func)(zfs_btree_t *, char *);
} btree_test_t;
static btree_test_t test_table[] = {
{ "insert_find_remove", insert_find_remove },
{ "find_without_index", find_without_index },
{ "drain_tree", drain_tree },
{ "stress_tree", stress_tree },
{ NULL, NULL }
};
int
main(int argc, char *argv[])
{
char *negative_test = NULL;
int failed_tests = 0;
struct timeval tp;
zfs_btree_t bt;
int c;
while ((c = getopt(argc, argv, "c:l:n:r:st:")) != -1) {
switch (c) {
case 'c':
contents_frequency = atoi(optarg);
break;
case 'l':
tree_limit = atoi(optarg);
break;
case 'n':
negative_test = optarg;
break;
case 'r':
seed = atoi(optarg);
break;
case 's':
stress_only = B_TRUE;
break;
case 't':
stress_timeout = atoi(optarg);
break;
case 'h':
default:
usage(1);
break;
}
}
if (seed == 0) {
(void) gettimeofday(&tp, NULL);
seed = tp.tv_sec;
}
srandom(seed);
zfs_btree_init();
- zfs_btree_create(&bt, zfs_btree_compare, sizeof (uint64_t));
+ zfs_btree_create(&bt, zfs_btree_compare, NULL, sizeof (uint64_t));
/*
* This runs the named negative test. None of them should
* return, as they both cause crashes.
*/
if (negative_test) {
return (do_negative_test(&bt, negative_test));
}
fprintf(stderr, "Seed: %u\n", seed);
/*
* This is a stress test that does operations on a btree over the
* requested timeout period, verifying them against identical
* operations in an avl tree.
*/
if (stress_only != 0) {
return (stress_tree(&bt, NULL));
}
/* Do the positive tests */
btree_test_t *test = &test_table[0];
while (test->name) {
int retval;
char why[BUFSIZE] = {0};
zfs_btree_index_t *idx = NULL;
(void) fprintf(stdout, "%-20s", test->name);
retval = test->func(&bt, why);
if (retval == 0) {
(void) fprintf(stdout, "ok\n");
} else {
(void) fprintf(stdout, "failed with %d\n", retval);
if (strlen(why) != 0)
(void) fprintf(stdout, "\t%s\n", why);
why[0] = '\0';
failed_tests++;
}
/* Remove all the elements and re-verify the tree */
while (zfs_btree_destroy_nodes(&bt, &idx) != NULL)
;
zfs_btree_verify(&bt);
test++;
}
zfs_btree_verify(&bt);
zfs_btree_fini();
return (failed_tests);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index ad4aec543299..ff65dc1ac2b0 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -1,2050 +1,2052 @@
CLEANFILES =
dist_noinst_DATA =
include $(top_srcdir)/config/Substfiles.am
datadir_zfs_tests_testsdir = $(datadir)/$(PACKAGE)/zfs-tests/tests
nobase_dist_datadir_zfs_tests_tests_DATA = \
perf/nfs-sample.cfg \
perf/perf.shlib \
\
perf/fio/mkfiles.fio \
perf/fio/random_reads.fio \
perf/fio/random_readwrite.fio \
perf/fio/random_readwrite_fixed.fio \
perf/fio/random_writes.fio \
perf/fio/sequential_reads.fio \
perf/fio/sequential_readwrite.fio \
perf/fio/sequential_writes.fio
nobase_dist_datadir_zfs_tests_tests_SCRIPTS = \
perf/regression/random_reads.ksh \
perf/regression/random_readwrite.ksh \
perf/regression/random_readwrite_fixed.ksh \
perf/regression/random_writes.ksh \
perf/regression/random_writes_zil.ksh \
perf/regression/sequential_reads_arc_cached_clone.ksh \
perf/regression/sequential_reads_arc_cached.ksh \
perf/regression/sequential_reads_dbuf_cached.ksh \
perf/regression/sequential_reads.ksh \
perf/regression/sequential_writes.ksh \
perf/regression/setup.ksh \
\
perf/scripts/prefetch_io.sh
# These lists can be regenerated by running make regen-tests at the root, or, on a *clean* source:
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable -name '*.in' | sort | sed 's/\.in$//;s/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
# find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po' -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$!s/$/ \\/'
#
# simd and tmpfile are Linux-only and not installed elsewhere
#
# C programs are specced in ../Makefile.am above as part of the main Makefile
find_common := find functional/ ! -type d ! -name .gitignore ! -name .dirstamp ! -name '*.Po'
regen:
@$(MAKE) -C $(top_builddir) clean
@$(MAKE) clean
$(SED) $(ac_inplace) '/^# -- >8 --/q' Makefile.am
echo >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_DATA = \' >> Makefile.am
$(find_common) ! -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \' >> Makefile.am
$(find_common) -executable -name '*.in' | sort | sed 's/\.in$$//;s/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'SUBSTFILES += $$(nobase_nodist_datadir_zfs_tests_tests_DATA) $$(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)' >> Makefile.am
echo >> Makefile.am
echo 'if BUILD_LINUX' >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) ! -name '*.in' ! -name '*.c' | grep -Fe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo 'endif' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_DATA += \' >> Makefile.am
$(find_common) ! -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
echo >> Makefile.am
echo 'nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \' >> Makefile.am
$(find_common) -executable ! -name '*.in' ! -name '*.c' | grep -vFe /simd -e /tmpfile | sort | sed 's/^/\t/;$$!s/$$/ \\/' >> Makefile.am
# -- >8 --
nobase_nodist_datadir_zfs_tests_tests_DATA = \
functional/pam/utilities.kshlib
nobase_nodist_datadir_zfs_tests_tests_SCRIPTS = \
functional/pyzfs/pyzfs_unittest.ksh
SUBSTFILES += $(nobase_nodist_datadir_zfs_tests_tests_DATA) $(nobase_nodist_datadir_zfs_tests_tests_SCRIPTS)
if BUILD_LINUX
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/simd/simd_supported.ksh \
functional/tmpfile/cleanup.ksh \
functional/tmpfile/setup.ksh
endif
nobase_dist_datadir_zfs_tests_tests_DATA += \
functional/acl/acl.cfg \
functional/acl/acl_common.kshlib \
functional/alloc_class/alloc_class.cfg \
functional/alloc_class/alloc_class.kshlib \
functional/atime/atime.cfg \
functional/atime/atime_common.kshlib \
functional/cache/cache.cfg \
functional/cache/cache.kshlib \
functional/cachefile/cachefile.cfg \
functional/cachefile/cachefile.kshlib \
functional/casenorm/casenorm.cfg \
functional/casenorm/casenorm.kshlib \
functional/channel_program/channel_common.kshlib \
functional/channel_program/lua_core/tst.args_to_lua.out \
functional/channel_program/lua_core/tst.args_to_lua.zcp \
functional/channel_program/lua_core/tst.divide_by_zero.err \
functional/channel_program/lua_core/tst.divide_by_zero.zcp \
functional/channel_program/lua_core/tst.exists.zcp \
functional/channel_program/lua_core/tst.large_prog.out \
functional/channel_program/lua_core/tst.large_prog.zcp \
functional/channel_program/lua_core/tst.lib_base.lua \
functional/channel_program/lua_core/tst.lib_coroutine.lua \
functional/channel_program/lua_core/tst.lib_strings.lua \
functional/channel_program/lua_core/tst.lib_table.lua \
functional/channel_program/lua_core/tst.nested_neg.zcp \
functional/channel_program/lua_core/tst.nested_pos.zcp \
functional/channel_program/lua_core/tst.recursive.zcp \
functional/channel_program/lua_core/tst.return_large.zcp \
functional/channel_program/lua_core/tst.return_recursive_table.zcp \
functional/channel_program/lua_core/tst.stack_gsub.err \
functional/channel_program/lua_core/tst.stack_gsub.zcp \
functional/channel_program/lua_core/tst.timeout.zcp \
functional/channel_program/synctask_core/tst.bookmark.copy.zcp \
functional/channel_program/synctask_core/tst.bookmark.create.zcp \
functional/channel_program/synctask_core/tst.get_index_props.out \
functional/channel_program/synctask_core/tst.get_index_props.zcp \
functional/channel_program/synctask_core/tst.get_number_props.out \
functional/channel_program/synctask_core/tst.get_number_props.zcp \
functional/channel_program/synctask_core/tst.get_string_props.out \
functional/channel_program/synctask_core/tst.get_string_props.zcp \
functional/channel_program/synctask_core/tst.promote_conflict.zcp \
functional/channel_program/synctask_core/tst.set_props.zcp \
functional/channel_program/synctask_core/tst.snapshot_destroy.zcp \
functional/channel_program/synctask_core/tst.snapshot_neg.zcp \
functional/channel_program/synctask_core/tst.snapshot_recursive.zcp \
functional/channel_program/synctask_core/tst.snapshot_rename.zcp \
functional/channel_program/synctask_core/tst.snapshot_simple.zcp \
functional/checksum/default.cfg \
functional/clean_mirror/clean_mirror_common.kshlib \
functional/clean_mirror/default.cfg \
functional/cli_root/cli_common.kshlib \
functional/cli_root/zfs_copies/zfs_copies.cfg \
functional/cli_root/zfs_copies/zfs_copies.kshlib \
functional/cli_root/zfs_create/properties.kshlib \
functional/cli_root/zfs_create/zfs_create.cfg \
functional/cli_root/zfs_create/zfs_create_common.kshlib \
functional/cli_root/zfs_destroy/zfs_destroy.cfg \
functional/cli_root/zfs_destroy/zfs_destroy_common.kshlib \
functional/cli_root/zfs_get/zfs_get_common.kshlib \
functional/cli_root/zfs_get/zfs_get_list_d.kshlib \
functional/cli_root/zfs_jail/jail.conf \
functional/cli_root/zfs_load-key/HEXKEY \
functional/cli_root/zfs_load-key/PASSPHRASE \
functional/cli_root/zfs_load-key/RAWKEY \
functional/cli_root/zfs_load-key/zfs_load-key.cfg \
functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib \
functional/cli_root/zfs_mount/zfs_mount.cfg \
functional/cli_root/zfs_mount/zfs_mount.kshlib \
functional/cli_root/zfs_promote/zfs_promote.cfg \
functional/cli_root/zfs_receive/zstd_test_data.txt \
functional/cli_root/zfs_rename/zfs_rename.cfg \
functional/cli_root/zfs_rename/zfs_rename.kshlib \
functional/cli_root/zfs_rollback/zfs_rollback.cfg \
functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib \
functional/cli_root/zfs_send/zfs_send.cfg \
functional/cli_root/zfs_set/zfs_set_common.kshlib \
functional/cli_root/zfs_share/zfs_share.cfg \
functional/cli_root/zfs_snapshot/zfs_snapshot.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.cfg \
functional/cli_root/zfs_unmount/zfs_unmount.kshlib \
functional/cli_root/zfs_upgrade/zfs_upgrade.kshlib \
functional/cli_root/zfs_wait/zfs_wait.kshlib \
functional/cli_root/zpool_add/zpool_add.cfg \
functional/cli_root/zpool_add/zpool_add.kshlib \
functional/cli_root/zpool_clear/zpool_clear.cfg \
functional/cli_root/zpool_create/draidcfg.gz \
functional/cli_root/zpool_create/zpool_create.cfg \
functional/cli_root/zpool_create/zpool_create.shlib \
functional/cli_root/zpool_destroy/zpool_destroy.cfg \
functional/cli_root/zpool_events/zpool_events.cfg \
functional/cli_root/zpool_events/zpool_events.kshlib \
functional/cli_root/zpool_expand/zpool_expand.cfg \
functional/cli_root/zpool_export/zpool_export.cfg \
functional/cli_root/zpool_export/zpool_export.kshlib \
functional/cli_root/zpool_get/vdev_get.cfg \
functional/cli_root/zpool_get/zpool_get.cfg \
functional/cli_root/zpool_get/zpool_get_parsable.cfg \
functional/cli_root/zpool_import/blockfiles/cryptv0.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/missing_ivset.dat.bz2 \
functional/cli_root/zpool_import/blockfiles/unclean_export.dat.bz2 \
functional/cli_root/zpool_import/zpool_import.cfg \
functional/cli_root/zpool_import/zpool_import.kshlib \
functional/cli_root/zpool_initialize/zpool_initialize.kshlib \
functional/cli_root/zpool_labelclear/labelclear.cfg \
functional/cli_root/zpool_remove/zpool_remove.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.cfg \
functional/cli_root/zpool_reopen/zpool_reopen.shlib \
functional/cli_root/zpool_resilver/zpool_resilver.cfg \
functional/cli_root/zpool_scrub/zpool_scrub.cfg \
functional/cli_root/zpool_split/zpool_split.cfg \
functional/cli_root/zpool_trim/zpool_trim.kshlib \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-broken-mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v10.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v11.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v12.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v13.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v14.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v15.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v1stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v2stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3hotspare3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3mirror3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz21.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz22.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz23.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3raidz3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe1.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe2.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v3stripe3.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v4.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v5.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v6.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v7.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v8.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v999.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-v9.dat.bz2 \
functional/cli_root/zpool_upgrade/blockfiles/zfs-pool-vBROKEN.dat.bz2 \
functional/cli_root/zpool_upgrade/zpool_upgrade.cfg \
functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib \
functional/cli_root/zpool_wait/zpool_wait.kshlib \
functional/cli_root/zhack/library.kshlib \
functional/cli_user/misc/misc.cfg \
functional/cli_user/zfs_list/zfs_list.cfg \
functional/cli_user/zfs_list/zfs_list.kshlib \
functional/compression/compress.cfg \
functional/compression/testpool_zstd.tar.gz \
functional/deadman/deadman.cfg \
functional/delegate/delegate.cfg \
functional/delegate/delegate_common.kshlib \
functional/devices/devices.cfg \
functional/devices/devices_common.kshlib \
functional/events/events.cfg \
functional/events/events_common.kshlib \
functional/fault/fault.cfg \
functional/grow/grow.cfg \
functional/history/history.cfg \
functional/history/history_common.kshlib \
functional/history/i386.migratedpool.DAT.Z \
functional/history/i386.orig_history.txt \
functional/history/sparc.migratedpool.DAT.Z \
functional/history/sparc.orig_history.txt \
functional/history/zfs-pool-v4.dat.Z \
functional/inheritance/config001.cfg \
functional/inheritance/config002.cfg \
functional/inheritance/config003.cfg \
functional/inheritance/config004.cfg \
functional/inheritance/config005.cfg \
functional/inheritance/config006.cfg \
functional/inheritance/config007.cfg \
functional/inheritance/config008.cfg \
functional/inheritance/config009.cfg \
functional/inheritance/config010.cfg \
functional/inheritance/config011.cfg \
functional/inheritance/config012.cfg \
functional/inheritance/config013.cfg \
functional/inheritance/config014.cfg \
functional/inheritance/config015.cfg \
functional/inheritance/config016.cfg \
functional/inheritance/config017.cfg \
functional/inheritance/config018.cfg \
functional/inheritance/config019.cfg \
functional/inheritance/config020.cfg \
functional/inheritance/config021.cfg \
functional/inheritance/config022.cfg \
functional/inheritance/config023.cfg \
functional/inheritance/config024.cfg \
functional/inheritance/inherit.kshlib \
functional/inheritance/README.config \
functional/inheritance/README.state \
functional/inheritance/state001.cfg \
functional/inheritance/state002.cfg \
functional/inheritance/state003.cfg \
functional/inheritance/state004.cfg \
functional/inheritance/state005.cfg \
functional/inheritance/state006.cfg \
functional/inheritance/state007.cfg \
functional/inheritance/state008.cfg \
functional/inheritance/state009.cfg \
functional/inheritance/state010.cfg \
functional/inheritance/state011.cfg \
functional/inheritance/state012.cfg \
functional/inheritance/state013.cfg \
functional/inheritance/state014.cfg \
functional/inheritance/state015.cfg \
functional/inheritance/state016.cfg \
functional/inheritance/state017.cfg \
functional/inheritance/state018.cfg \
functional/inheritance/state019.cfg \
functional/inheritance/state020.cfg \
functional/inheritance/state021.cfg \
functional/inheritance/state022.cfg \
functional/inheritance/state023.cfg \
functional/inheritance/state024.cfg \
functional/inuse/inuse.cfg \
functional/io/io.cfg \
functional/l2arc/l2arc.cfg \
functional/largest_pool/largest_pool.cfg \
functional/migration/migration.cfg \
functional/migration/migration.kshlib \
functional/mmap/mmap.cfg \
functional/mmp/mmp.cfg \
functional/mmp/mmp.kshlib \
functional/mv_files/mv_files.cfg \
functional/mv_files/mv_files_common.kshlib \
functional/nopwrite/nopwrite.shlib \
functional/no_space/enospc.cfg \
functional/online_offline/online_offline.cfg \
functional/pool_checkpoint/pool_checkpoint.kshlib \
functional/projectquota/projectquota.cfg \
functional/projectquota/projectquota_common.kshlib \
functional/quota/quota.cfg \
functional/quota/quota.kshlib \
functional/redacted_send/redacted.cfg \
functional/redacted_send/redacted.kshlib \
functional/redundancy/redundancy.cfg \
functional/redundancy/redundancy.kshlib \
functional/refreserv/refreserv.cfg \
functional/removal/removal.kshlib \
functional/replacement/replacement.cfg \
functional/reservation/reservation.cfg \
functional/reservation/reservation.shlib \
functional/rsend/dedup_encrypted_zvol.bz2 \
functional/rsend/dedup_encrypted_zvol.zsend.bz2 \
functional/rsend/dedup.zsend.bz2 \
functional/rsend/fs.tar.gz \
functional/rsend/rsend.cfg \
functional/rsend/rsend.kshlib \
functional/scrub_mirror/default.cfg \
functional/scrub_mirror/scrub_mirror_common.kshlib \
functional/slog/slog.cfg \
functional/slog/slog.kshlib \
functional/snapshot/snapshot.cfg \
functional/snapused/snapused.kshlib \
functional/sparse/sparse.cfg \
functional/trim/trim.cfg \
functional/trim/trim.kshlib \
functional/truncate/truncate.cfg \
functional/upgrade/upgrade_common.kshlib \
functional/user_namespace/user_namespace.cfg \
functional/user_namespace/user_namespace_common.kshlib \
functional/userquota/13709_reproducer.bz2 \
functional/userquota/userquota.cfg \
functional/userquota/userquota_common.kshlib \
functional/vdev_zaps/vdev_zaps.kshlib \
functional/xattr/xattr.cfg \
functional/xattr/xattr_common.kshlib \
functional/zvol/zvol.cfg \
functional/zvol/zvol_cli/zvol_cli.cfg \
functional/zvol/zvol_common.shlib \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC.cfg \
functional/zvol/zvol_misc/zvol_misc_common.kshlib \
functional/zvol/zvol_swap/zvol_swap.cfg \
functional/idmap_mount/idmap_mount.cfg \
functional/idmap_mount/idmap_mount_common.kshlib
nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/acl/off/cleanup.ksh \
functional/acl/off/dosmode.ksh \
functional/acl/off/posixmode.ksh \
functional/acl/off/setup.ksh \
functional/acl/posix/cleanup.ksh \
functional/acl/posix/posix_001_pos.ksh \
functional/acl/posix/posix_002_pos.ksh \
functional/acl/posix/posix_003_pos.ksh \
functional/acl/posix/posix_004_pos.ksh \
functional/acl/posix-sa/cleanup.ksh \
functional/acl/posix-sa/posix_001_pos.ksh \
functional/acl/posix-sa/posix_002_pos.ksh \
functional/acl/posix-sa/posix_003_pos.ksh \
functional/acl/posix-sa/posix_004_pos.ksh \
functional/acl/posix-sa/setup.ksh \
functional/acl/posix/setup.ksh \
functional/alloc_class/alloc_class_001_pos.ksh \
functional/alloc_class/alloc_class_002_neg.ksh \
functional/alloc_class/alloc_class_003_pos.ksh \
functional/alloc_class/alloc_class_004_pos.ksh \
functional/alloc_class/alloc_class_005_pos.ksh \
functional/alloc_class/alloc_class_006_pos.ksh \
functional/alloc_class/alloc_class_007_pos.ksh \
functional/alloc_class/alloc_class_008_pos.ksh \
functional/alloc_class/alloc_class_009_pos.ksh \
functional/alloc_class/alloc_class_010_pos.ksh \
functional/alloc_class/alloc_class_011_neg.ksh \
functional/alloc_class/alloc_class_012_pos.ksh \
functional/alloc_class/alloc_class_013_pos.ksh \
functional/alloc_class/alloc_class_014_neg.ksh \
functional/alloc_class/alloc_class_015_pos.ksh \
functional/alloc_class/cleanup.ksh \
functional/alloc_class/setup.ksh \
functional/append/file_append.ksh \
functional/append/threadsappend_001_pos.ksh \
functional/append/cleanup.ksh \
functional/append/setup.ksh \
functional/arc/arcstats_runtime_tuning.ksh \
functional/arc/cleanup.ksh \
functional/arc/dbufstats_001_pos.ksh \
functional/arc/dbufstats_002_pos.ksh \
functional/arc/dbufstats_003_pos.ksh \
functional/arc/setup.ksh \
functional/atime/atime_001_pos.ksh \
functional/atime/atime_002_neg.ksh \
functional/atime/atime_003_pos.ksh \
functional/atime/cleanup.ksh \
functional/atime/root_atime_off.ksh \
functional/atime/root_atime_on.ksh \
functional/atime/root_relatime_on.ksh \
functional/atime/setup.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
functional/bootfs/bootfs_004_neg.ksh \
functional/bootfs/bootfs_005_neg.ksh \
functional/bootfs/bootfs_006_pos.ksh \
functional/bootfs/bootfs_007_pos.ksh \
functional/bootfs/bootfs_008_pos.ksh \
functional/bootfs/cleanup.ksh \
functional/bootfs/setup.ksh \
functional/btree/btree_negative.ksh \
functional/btree/btree_positive.ksh \
functional/cache/cache_001_pos.ksh \
functional/cache/cache_002_pos.ksh \
functional/cache/cache_003_pos.ksh \
functional/cache/cache_004_neg.ksh \
functional/cache/cache_005_neg.ksh \
functional/cache/cache_006_pos.ksh \
functional/cache/cache_007_neg.ksh \
functional/cache/cache_008_neg.ksh \
functional/cache/cache_009_pos.ksh \
functional/cache/cache_010_pos.ksh \
functional/cache/cache_011_pos.ksh \
functional/cache/cache_012_pos.ksh \
functional/cache/cleanup.ksh \
functional/cachefile/cachefile_001_pos.ksh \
functional/cachefile/cachefile_002_pos.ksh \
functional/cachefile/cachefile_003_pos.ksh \
functional/cachefile/cachefile_004_pos.ksh \
functional/cachefile/cleanup.ksh \
functional/cachefile/setup.ksh \
functional/cache/setup.ksh \
functional/casenorm/case_all_values.ksh \
functional/casenorm/cleanup.ksh \
functional/casenorm/insensitive_formd_delete.ksh \
functional/casenorm/insensitive_formd_lookup.ksh \
functional/casenorm/insensitive_none_delete.ksh \
functional/casenorm/insensitive_none_lookup.ksh \
functional/casenorm/mixed_create_failure.ksh \
functional/casenorm/mixed_formd_delete.ksh \
functional/casenorm/mixed_formd_lookup_ci.ksh \
functional/casenorm/mixed_formd_lookup.ksh \
functional/casenorm/mixed_none_delete.ksh \
functional/casenorm/mixed_none_lookup_ci.ksh \
functional/casenorm/mixed_none_lookup.ksh \
functional/casenorm/norm_all_values.ksh \
functional/casenorm/sensitive_formd_delete.ksh \
functional/casenorm/sensitive_formd_lookup.ksh \
functional/casenorm/sensitive_none_delete.ksh \
functional/casenorm/sensitive_none_lookup.ksh \
functional/casenorm/setup.ksh \
functional/channel_program/lua_core/cleanup.ksh \
functional/channel_program/lua_core/setup.ksh \
functional/channel_program/lua_core/tst.args_to_lua.ksh \
functional/channel_program/lua_core/tst.divide_by_zero.ksh \
functional/channel_program/lua_core/tst.exists.ksh \
functional/channel_program/lua_core/tst.integer_illegal.ksh \
functional/channel_program/lua_core/tst.integer_overflow.ksh \
functional/channel_program/lua_core/tst.language_functions_neg.ksh \
functional/channel_program/lua_core/tst.language_functions_pos.ksh \
functional/channel_program/lua_core/tst.large_prog.ksh \
functional/channel_program/lua_core/tst.libraries.ksh \
functional/channel_program/lua_core/tst.memory_limit.ksh \
functional/channel_program/lua_core/tst.nested_neg.ksh \
functional/channel_program/lua_core/tst.nested_pos.ksh \
functional/channel_program/lua_core/tst.nvlist_to_lua.ksh \
functional/channel_program/lua_core/tst.recursive_neg.ksh \
functional/channel_program/lua_core/tst.recursive_pos.ksh \
functional/channel_program/lua_core/tst.return_large.ksh \
functional/channel_program/lua_core/tst.return_nvlist_neg.ksh \
functional/channel_program/lua_core/tst.return_nvlist_pos.ksh \
functional/channel_program/lua_core/tst.return_recursive_table.ksh \
functional/channel_program/lua_core/tst.stack_gsub.ksh \
functional/channel_program/lua_core/tst.timeout.ksh \
functional/channel_program/synctask_core/cleanup.ksh \
functional/channel_program/synctask_core/setup.ksh \
functional/channel_program/synctask_core/tst.bookmark.copy.ksh \
functional/channel_program/synctask_core/tst.bookmark.create.ksh \
functional/channel_program/synctask_core/tst.destroy_fs.ksh \
functional/channel_program/synctask_core/tst.destroy_snap.ksh \
functional/channel_program/synctask_core/tst.get_count_and_limit.ksh \
functional/channel_program/synctask_core/tst.get_index_props.ksh \
functional/channel_program/synctask_core/tst.get_mountpoint.ksh \
functional/channel_program/synctask_core/tst.get_neg.ksh \
functional/channel_program/synctask_core/tst.get_number_props.ksh \
functional/channel_program/synctask_core/tst.get_string_props.ksh \
functional/channel_program/synctask_core/tst.get_type.ksh \
functional/channel_program/synctask_core/tst.get_userquota.ksh \
functional/channel_program/synctask_core/tst.get_written.ksh \
functional/channel_program/synctask_core/tst.inherit.ksh \
functional/channel_program/synctask_core/tst.list_bookmarks.ksh \
functional/channel_program/synctask_core/tst.list_children.ksh \
functional/channel_program/synctask_core/tst.list_clones.ksh \
functional/channel_program/synctask_core/tst.list_holds.ksh \
functional/channel_program/synctask_core/tst.list_snapshots.ksh \
functional/channel_program/synctask_core/tst.list_system_props.ksh \
functional/channel_program/synctask_core/tst.list_user_props.ksh \
functional/channel_program/synctask_core/tst.parse_args_neg.ksh \
functional/channel_program/synctask_core/tst.promote_conflict.ksh \
functional/channel_program/synctask_core/tst.promote_multiple.ksh \
functional/channel_program/synctask_core/tst.promote_simple.ksh \
functional/channel_program/synctask_core/tst.rollback_mult.ksh \
functional/channel_program/synctask_core/tst.rollback_one.ksh \
functional/channel_program/synctask_core/tst.set_props.ksh \
functional/channel_program/synctask_core/tst.snapshot_destroy.ksh \
functional/channel_program/synctask_core/tst.snapshot_neg.ksh \
functional/channel_program/synctask_core/tst.snapshot_recursive.ksh \
functional/channel_program/synctask_core/tst.snapshot_rename.ksh \
functional/channel_program/synctask_core/tst.snapshot_simple.ksh \
functional/channel_program/synctask_core/tst.terminate_by_signal.ksh \
functional/chattr/chattr_001_pos.ksh \
functional/chattr/chattr_002_neg.ksh \
functional/chattr/cleanup.ksh \
functional/chattr/setup.ksh \
functional/checksum/cleanup.ksh \
functional/checksum/filetest_001_pos.ksh \
functional/checksum/filetest_002_pos.ksh \
functional/checksum/run_blake3_test.ksh \
functional/checksum/run_edonr_test.ksh \
functional/checksum/run_sha2_test.ksh \
functional/checksum/run_skein_test.ksh \
functional/checksum/setup.ksh \
functional/clean_mirror/clean_mirror_001_pos.ksh \
functional/clean_mirror/clean_mirror_002_pos.ksh \
functional/clean_mirror/clean_mirror_003_pos.ksh \
functional/clean_mirror/clean_mirror_004_pos.ksh \
functional/clean_mirror/cleanup.ksh \
functional/clean_mirror/setup.ksh \
functional/cli_root/zdb/zdb_002_pos.ksh \
functional/cli_root/zdb/zdb_003_pos.ksh \
functional/cli_root/zdb/zdb_004_pos.ksh \
functional/cli_root/zdb/zdb_005_pos.ksh \
functional/cli_root/zdb/zdb_006_pos.ksh \
functional/cli_root/zdb/zdb_args_neg.ksh \
functional/cli_root/zdb/zdb_args_pos.ksh \
+ functional/cli_root/zdb/zdb_backup.ksh \
functional/cli_root/zdb/zdb_block_size_histogram.ksh \
functional/cli_root/zdb/zdb_checksum.ksh \
functional/cli_root/zdb/zdb_decompress.ksh \
functional/cli_root/zdb/zdb_decompress_zstd.ksh \
functional/cli_root/zdb/zdb_display_block.ksh \
functional/cli_root/zdb/zdb_encrypted.ksh \
functional/cli_root/zdb/zdb_label_checksum.ksh \
functional/cli_root/zdb/zdb_object_range_neg.ksh \
functional/cli_root/zdb/zdb_object_range_pos.ksh \
functional/cli_root/zdb/zdb_objset_id.ksh \
functional/cli_root/zdb/zdb_recover_2.ksh \
functional/cli_root/zdb/zdb_recover.ksh \
functional/cli_root/zfs_bookmark/cleanup.ksh \
functional/cli_root/zfs_bookmark/setup.ksh \
functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh \
functional/cli_root/zfs_change-key/cleanup.ksh \
functional/cli_root/zfs_change-key/setup.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_child.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_clones.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_format.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_inherit.ksh \
functional/cli_root/zfs_change-key/zfs_change-key.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_load.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_location.ksh \
functional/cli_root/zfs_change-key/zfs_change-key_pbkdf2iters.ksh \
functional/cli_root/zfs/cleanup.ksh \
functional/cli_root/zfs_clone/cleanup.ksh \
functional/cli_root/zfs_clone/setup.ksh \
functional/cli_root/zfs_clone/zfs_clone_001_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_002_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_003_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_004_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_006_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_007_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_008_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_009_neg.ksh \
functional/cli_root/zfs_clone/zfs_clone_010_pos.ksh \
functional/cli_root/zfs_clone/zfs_clone_deeply_nested.ksh \
functional/cli_root/zfs_clone/zfs_clone_encrypted.ksh \
functional/cli_root/zfs_clone/zfs_clone_rm_nested.ksh \
functional/cli_root/zfs_copies/cleanup.ksh \
functional/cli_root/zfs_copies/setup.ksh \
functional/cli_root/zfs_copies/zfs_copies_001_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_003_pos.ksh \
functional/cli_root/zfs_copies/zfs_copies_004_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh \
functional/cli_root/zfs_copies/zfs_copies_006_pos.ksh \
functional/cli_root/zfs_create/cleanup.ksh \
functional/cli_root/zfs_create/setup.ksh \
functional/cli_root/zfs_create/zfs_create_001_pos.ksh \
functional/cli_root/zfs_create/zfs_create_002_pos.ksh \
functional/cli_root/zfs_create/zfs_create_003_pos.ksh \
functional/cli_root/zfs_create/zfs_create_004_pos.ksh \
functional/cli_root/zfs_create/zfs_create_005_pos.ksh \
functional/cli_root/zfs_create/zfs_create_006_pos.ksh \
functional/cli_root/zfs_create/zfs_create_007_pos.ksh \
functional/cli_root/zfs_create/zfs_create_008_neg.ksh \
functional/cli_root/zfs_create/zfs_create_009_neg.ksh \
functional/cli_root/zfs_create/zfs_create_010_neg.ksh \
functional/cli_root/zfs_create/zfs_create_011_pos.ksh \
functional/cli_root/zfs_create/zfs_create_012_pos.ksh \
functional/cli_root/zfs_create/zfs_create_013_pos.ksh \
functional/cli_root/zfs_create/zfs_create_014_pos.ksh \
functional/cli_root/zfs_create/zfs_create_crypt_combos.ksh \
functional/cli_root/zfs_create/zfs_create_dryrun.ksh \
functional/cli_root/zfs_create/zfs_create_encrypted.ksh \
functional/cli_root/zfs_create/zfs_create_nomount.ksh \
functional/cli_root/zfs_create/zfs_create_verbose.ksh \
functional/cli_root/zfs_destroy/cleanup.ksh \
functional/cli_root/zfs_destroy/setup.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh \
functional/cli_root/zfs_destroy/zfs_clone_livelist_dedup.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_001_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_002_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_003_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_005_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_006_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_007_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_008_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_009_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_010_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_011_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_012_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_013_neg.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_014_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_016_pos.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh \
functional/cli_root/zfs_destroy/zfs_destroy_dev_removal.ksh \
functional/cli_root/zfs_diff/cleanup.ksh \
functional/cli_root/zfs_diff/setup.ksh \
functional/cli_root/zfs_diff/zfs_diff_changes.ksh \
functional/cli_root/zfs_diff/zfs_diff_cliargs.ksh \
functional/cli_root/zfs_diff/zfs_diff_encrypted.ksh \
functional/cli_root/zfs_diff/zfs_diff_mangle.ksh \
functional/cli_root/zfs_diff/zfs_diff_timestamp.ksh \
functional/cli_root/zfs_diff/zfs_diff_types.ksh \
functional/cli_root/zfs_get/cleanup.ksh \
functional/cli_root/zfs_get/setup.ksh \
functional/cli_root/zfs_get/zfs_get_001_pos.ksh \
functional/cli_root/zfs_get/zfs_get_002_pos.ksh \
functional/cli_root/zfs_get/zfs_get_003_pos.ksh \
functional/cli_root/zfs_get/zfs_get_004_pos.ksh \
functional/cli_root/zfs_get/zfs_get_005_neg.ksh \
functional/cli_root/zfs_get/zfs_get_006_neg.ksh \
functional/cli_root/zfs_get/zfs_get_007_neg.ksh \
functional/cli_root/zfs_get/zfs_get_008_pos.ksh \
functional/cli_root/zfs_get/zfs_get_009_pos.ksh \
functional/cli_root/zfs_get/zfs_get_010_neg.ksh \
functional/cli_root/zfs_ids_to_path/cleanup.ksh \
functional/cli_root/zfs_ids_to_path/setup.ksh \
functional/cli_root/zfs_ids_to_path/zfs_ids_to_path_001_pos.ksh \
functional/cli_root/zfs_inherit/cleanup.ksh \
functional/cli_root/zfs_inherit/setup.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_001_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_002_neg.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_003_pos.ksh \
functional/cli_root/zfs_inherit/zfs_inherit_mountpoint.ksh \
functional/cli_root/zfs_jail/cleanup.ksh \
functional/cli_root/zfs_jail/setup.ksh \
functional/cli_root/zfs_jail/zfs_jail_001_pos.ksh \
functional/cli_root/zfs_load-key/cleanup.ksh \
functional/cli_root/zfs_load-key/setup.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_all.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_file.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_https.ksh \
functional/cli_root/zfs_load-key/zfs_load-key.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_location.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_noop.ksh \
functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh \
functional/cli_root/zfs_mount/cleanup.ksh \
functional/cli_root/zfs_mount/setup.ksh \
functional/cli_root/zfs_mount/zfs_mount_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_002_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_003_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_004_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_005_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_006_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_008_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_009_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_010_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_011_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_012_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_001_pos.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh \
functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh \
functional/cli_root/zfs_mount/zfs_mount_encrypted.ksh \
functional/cli_root/zfs_mount/zfs_mount_remount.ksh \
functional/cli_root/zfs_mount/zfs_mount_test_race.ksh \
functional/cli_root/zfs_mount/zfs_multi_mount.ksh \
functional/cli_root/zfs_program/cleanup.ksh \
functional/cli_root/zfs_program/setup.ksh \
functional/cli_root/zfs_program/zfs_program_json.ksh \
functional/cli_root/zfs_promote/cleanup.ksh \
functional/cli_root/zfs_promote/setup.ksh \
functional/cli_root/zfs_promote/zfs_promote_001_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_002_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_003_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_004_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_005_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_006_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_007_neg.ksh \
functional/cli_root/zfs_promote/zfs_promote_008_pos.ksh \
functional/cli_root/zfs_promote/zfs_promote_encryptionroot.ksh \
functional/cli_root/zfs_property/cleanup.ksh \
functional/cli_root/zfs_property/setup.ksh \
functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh \
functional/cli_root/zfs_receive/cleanup.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_aliases.ksh \
functional/cli_root/zfs_receive/receive-o-x_props_override.ksh \
functional/cli_root/zfs_receive/setup.ksh \
functional/cli_root/zfs_receive/zfs_receive_001_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_002_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_003_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_004_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_005_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_006_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_007_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_008_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_009_neg.ksh \
functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_011_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_012_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_013_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_015_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_016_pos.ksh \
functional/cli_root/zfs_receive/zfs_receive_-e.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_from_zstd.ksh \
functional/cli_root/zfs_receive/zfs_receive_new_props.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_-d.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw_incremental.ksh \
functional/cli_root/zfs_receive/zfs_receive_raw.ksh \
functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh \
functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh \
functional/cli_root/zfs_receive/zfs_receive_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_compressed_corrective.ksh \
functional/cli_root/zfs_receive/zfs_receive_large_block_corrective.ksh \
functional/cli_root/zfs_rename/cleanup.ksh \
functional/cli_root/zfs_rename/setup.ksh \
functional/cli_root/zfs_rename/zfs_rename_001_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_002_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_003_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_004_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_008_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_010_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_011_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_012_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_013_pos.ksh \
functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh \
functional/cli_root/zfs_rename/zfs_rename_encrypted_child.ksh \
functional/cli_root/zfs_rename/zfs_rename_mountpoint.ksh \
functional/cli_root/zfs_rename/zfs_rename_nounmount.ksh \
functional/cli_root/zfs_rename/zfs_rename_to_encrypted.ksh \
functional/cli_root/zfs_reservation/cleanup.ksh \
functional/cli_root/zfs_reservation/setup.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_001_pos.ksh \
functional/cli_root/zfs_reservation/zfs_reservation_002_pos.ksh \
functional/cli_root/zfs_rollback/cleanup.ksh \
functional/cli_root/zfs_rollback/setup.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_002_pos.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_003_neg.ksh \
functional/cli_root/zfs_rollback/zfs_rollback_004_neg.ksh \
functional/cli_root/zfs_send/cleanup.ksh \
functional/cli_root/zfs_send/setup.ksh \
functional/cli_root/zfs_send/zfs_send_001_pos.ksh \
functional/cli_root/zfs_send/zfs_send_002_pos.ksh \
functional/cli_root/zfs_send/zfs_send_003_pos.ksh \
functional/cli_root/zfs_send/zfs_send_004_neg.ksh \
functional/cli_root/zfs_send/zfs_send_005_pos.ksh \
functional/cli_root/zfs_send/zfs_send_006_pos.ksh \
functional/cli_root/zfs_send/zfs_send_007_pos.ksh \
functional/cli_root/zfs_send/zfs_send-b.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted.ksh \
functional/cli_root/zfs_send/zfs_send_encrypted_unloaded.ksh \
functional/cli_root/zfs_send/zfs_send_raw.ksh \
functional/cli_root/zfs_send/zfs_send_skip_missing.ksh \
functional/cli_root/zfs_send/zfs_send_sparse.ksh \
functional/cli_root/zfs_set/cache_001_pos.ksh \
functional/cli_root/zfs_set/cache_002_neg.ksh \
functional/cli_root/zfs_set/canmount_001_pos.ksh \
functional/cli_root/zfs_set/canmount_002_pos.ksh \
functional/cli_root/zfs_set/canmount_003_pos.ksh \
functional/cli_root/zfs_set/canmount_004_pos.ksh \
functional/cli_root/zfs_set/checksum_001_pos.ksh \
functional/cli_root/zfs_set/cleanup.ksh \
functional/cli_root/zfs_set/compression_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_001_pos.ksh \
functional/cli_root/zfs_set/mountpoint_002_pos.ksh \
functional/cli_root/zfs_set/mountpoint_003_pos.ksh \
functional/cli_root/zfs_set/onoffs_001_pos.ksh \
functional/cli_root/zfs_set/property_alias_001_pos.ksh \
functional/cli_root/zfs_set/readonly_001_pos.ksh \
functional/cli_root/zfs_set/reservation_001_neg.ksh \
functional/cli_root/zfs_set/ro_props_001_pos.ksh \
functional/cli_root/zfs_set/setup.ksh \
functional/cli_root/zfs_set/share_mount_001_neg.ksh \
functional/cli_root/zfs_set/snapdir_001_pos.ksh \
functional/cli_root/zfs/setup.ksh \
functional/cli_root/zfs_set/user_property_001_pos.ksh \
functional/cli_root/zfs_set/user_property_002_pos.ksh \
functional/cli_root/zfs_set/user_property_003_neg.ksh \
functional/cli_root/zfs_set/user_property_004_pos.ksh \
functional/cli_root/zfs_set/version_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_001_neg.ksh \
functional/cli_root/zfs_set/zfs_set_002_neg.ksh \
functional/cli_root/zfs_set/zfs_set_003_neg.ksh \
functional/cli_root/zfs_set/zfs_set_feature_activation.ksh \
functional/cli_root/zfs_set/zfs_set_keylocation.ksh \
functional/cli_root/zfs_share/cleanup.ksh \
functional/cli_root/zfs_share/setup.ksh \
functional/cli_root/zfs_share/zfs_share_001_pos.ksh \
functional/cli_root/zfs_share/zfs_share_002_pos.ksh \
functional/cli_root/zfs_share/zfs_share_003_pos.ksh \
functional/cli_root/zfs_share/zfs_share_004_pos.ksh \
functional/cli_root/zfs_share/zfs_share_005_pos.ksh \
functional/cli_root/zfs_share/zfs_share_006_pos.ksh \
functional/cli_root/zfs_share/zfs_share_007_neg.ksh \
functional/cli_root/zfs_share/zfs_share_008_neg.ksh \
functional/cli_root/zfs_share/zfs_share_009_neg.ksh \
functional/cli_root/zfs_share/zfs_share_010_neg.ksh \
functional/cli_root/zfs_share/zfs_share_011_pos.ksh \
functional/cli_root/zfs_share/zfs_share_012_pos.ksh \
functional/cli_root/zfs_share/zfs_share_013_pos.ksh \
functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh \
functional/cli_root/zfs_snapshot/cleanup.ksh \
functional/cli_root/zfs_snapshot/setup.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_001_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_002_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_003_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_004_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_005_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_006_pos.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_007_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh \
functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh \
functional/cli_root/zfs_sysfs/cleanup.ksh \
functional/cli_root/zfs_sysfs/setup.ksh \
functional/cli_root/zfs_sysfs/zfeature_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_set_unsupported.ksh \
functional/cli_root/zfs_sysfs/zfs_sysfs_live.ksh \
functional/cli_root/zfs_sysfs/zpool_get_unsupported.ksh \
functional/cli_root/zfs_sysfs/zpool_set_unsupported.ksh \
functional/cli_root/zfs_unload-key/cleanup.ksh \
functional/cli_root/zfs_unload-key/setup.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_all.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key.ksh \
functional/cli_root/zfs_unload-key/zfs_unload-key_recursive.ksh \
functional/cli_root/zfs_unmount/cleanup.ksh \
functional/cli_root/zfs_unmount/setup.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_002_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_003_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_004_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_006_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_007_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_008_neg.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_nested.ksh \
functional/cli_root/zfs_unmount/zfs_unmount_unload_keys.ksh \
functional/cli_root/zfs_unshare/cleanup.ksh \
functional/cli_root/zfs_unshare/setup.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_001_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_003_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_004_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_005_neg.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_007_pos.ksh \
functional/cli_root/zfs_unshare/zfs_unshare_008_pos.ksh \
functional/cli_root/zfs_upgrade/cleanup.ksh \
functional/cli_root/zfs_upgrade/setup.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_001_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_002_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_003_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_004_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_005_pos.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_006_neg.ksh \
functional/cli_root/zfs_upgrade/zfs_upgrade_007_neg.ksh \
functional/cli_root/zfs_wait/cleanup.ksh \
functional/cli_root/zfs_wait/setup.ksh \
functional/cli_root/zfs_wait/zfs_wait_deleteq.ksh \
functional/cli_root/zfs_wait/zfs_wait_getsubopt.ksh \
functional/cli_root/zfs/zfs_001_neg.ksh \
functional/cli_root/zfs/zfs_002_pos.ksh \
functional/cli_root/zfs/zfs_003_neg.ksh \
functional/cli_root/zhack/zhack_label_repair_001.ksh \
functional/cli_root/zhack/zhack_label_repair_002.ksh \
functional/cli_root/zhack/zhack_label_repair_003.ksh \
functional/cli_root/zhack/zhack_label_repair_004.ksh \
functional/cli_root/zpool_add/add_nested_replacing_spare.ksh \
functional/cli_root/zpool_add/add-o_ashift.ksh \
functional/cli_root/zpool_add/add_prop_ashift.ksh \
functional/cli_root/zpool_add/cleanup.ksh \
functional/cli_root/zpool_add/setup.ksh \
functional/cli_root/zpool_add/zpool_add_001_pos.ksh \
functional/cli_root/zpool_add/zpool_add_002_pos.ksh \
functional/cli_root/zpool_add/zpool_add_003_pos.ksh \
functional/cli_root/zpool_add/zpool_add_004_pos.ksh \
functional/cli_root/zpool_add/zpool_add_005_pos.ksh \
functional/cli_root/zpool_add/zpool_add_006_pos.ksh \
functional/cli_root/zpool_add/zpool_add_007_neg.ksh \
functional/cli_root/zpool_add/zpool_add_008_neg.ksh \
functional/cli_root/zpool_add/zpool_add_009_neg.ksh \
functional/cli_root/zpool_add/zpool_add_010_pos.ksh \
functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh \
functional/cli_root/zpool_attach/attach-o_ashift.ksh \
functional/cli_root/zpool_attach/cleanup.ksh \
functional/cli_root/zpool_attach/setup.ksh \
functional/cli_root/zpool_attach/zpool_attach_001_neg.ksh \
functional/cli_root/zpool/cleanup.ksh \
functional/cli_root/zpool_clear/cleanup.ksh \
functional/cli_root/zpool_clear/setup.ksh \
functional/cli_root/zpool_clear/zpool_clear_001_pos.ksh \
functional/cli_root/zpool_clear/zpool_clear_002_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_003_neg.ksh \
functional/cli_root/zpool_clear/zpool_clear_readonly.ksh \
functional/cli_root/zpool_create/cleanup.ksh \
functional/cli_root/zpool_create/create-o_ashift.ksh \
functional/cli_root/zpool_create/setup.ksh \
functional/cli_root/zpool_create/zpool_create_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_007_neg.ksh \
functional/cli_root/zpool_create/zpool_create_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_009_neg.ksh \
functional/cli_root/zpool_create/zpool_create_010_neg.ksh \
functional/cli_root/zpool_create/zpool_create_011_neg.ksh \
functional/cli_root/zpool_create/zpool_create_012_neg.ksh \
functional/cli_root/zpool_create/zpool_create_014_neg.ksh \
functional/cli_root/zpool_create/zpool_create_015_neg.ksh \
functional/cli_root/zpool_create/zpool_create_016_pos.ksh \
functional/cli_root/zpool_create/zpool_create_017_neg.ksh \
functional/cli_root/zpool_create/zpool_create_018_pos.ksh \
functional/cli_root/zpool_create/zpool_create_019_pos.ksh \
functional/cli_root/zpool_create/zpool_create_020_pos.ksh \
functional/cli_root/zpool_create/zpool_create_021_pos.ksh \
functional/cli_root/zpool_create/zpool_create_022_pos.ksh \
functional/cli_root/zpool_create/zpool_create_023_neg.ksh \
functional/cli_root/zpool_create/zpool_create_024_pos.ksh \
functional/cli_root/zpool_create/zpool_create_crypt_combos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh \
functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh \
functional/cli_root/zpool_create/zpool_create_encrypted.ksh \
functional/cli_root/zpool_create/zpool_create_features_001_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_002_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_003_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_004_neg.ksh \
functional/cli_root/zpool_create/zpool_create_features_005_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_006_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_007_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_008_pos.ksh \
functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh \
functional/cli_root/zpool_create/zpool_create_tempname.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_002_pos.ksh \
functional/cli_root/zpool_destroy/zpool_destroy_003_neg.ksh \
functional/cli_root/zpool_detach/cleanup.ksh \
functional/cli_root/zpool_detach/setup.ksh \
functional/cli_root/zpool_detach/zpool_detach_001_neg.ksh \
functional/cli_root/zpool_events/cleanup.ksh \
functional/cli_root/zpool_events/setup.ksh \
functional/cli_root/zpool_events/zpool_events_clear.ksh \
functional/cli_root/zpool_events/zpool_events_clear_retained.ksh \
functional/cli_root/zpool_events/zpool_events_cliargs.ksh \
functional/cli_root/zpool_events/zpool_events_duplicates.ksh \
functional/cli_root/zpool_events/zpool_events_errors.ksh \
functional/cli_root/zpool_events/zpool_events_follow.ksh \
functional/cli_root/zpool_events/zpool_events_poolname.ksh \
functional/cli_root/zpool_expand/cleanup.ksh \
functional/cli_root/zpool_expand/setup.ksh \
functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh \
functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh \
functional/cli_root/zpool_expand/zpool_expand_005_pos.ksh \
functional/cli_root/zpool_export/cleanup.ksh \
functional/cli_root/zpool_export/setup.ksh \
functional/cli_root/zpool_export/zpool_export_001_pos.ksh \
functional/cli_root/zpool_export/zpool_export_002_pos.ksh \
functional/cli_root/zpool_export/zpool_export_003_neg.ksh \
functional/cli_root/zpool_export/zpool_export_004_pos.ksh \
functional/cli_root/zpool_get/cleanup.ksh \
functional/cli_root/zpool_get/setup.ksh \
functional/cli_root/zpool_get/vdev_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_001_pos.ksh \
functional/cli_root/zpool_get/zpool_get_002_pos.ksh \
functional/cli_root/zpool_get/zpool_get_003_pos.ksh \
functional/cli_root/zpool_get/zpool_get_004_neg.ksh \
functional/cli_root/zpool_get/zpool_get_005_pos.ksh \
functional/cli_root/zpool_history/cleanup.ksh \
functional/cli_root/zpool_history/setup.ksh \
functional/cli_root/zpool_history/zpool_history_001_neg.ksh \
functional/cli_root/zpool_history/zpool_history_002_pos.ksh \
functional/cli_root/zpool_import/cleanup.ksh \
functional/cli_root/zpool_import/import_cachefile_device_added.ksh \
functional/cli_root/zpool_import/import_cachefile_device_removed.ksh \
functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_attached.ksh \
functional/cli_root/zpool_import/import_cachefile_mirror_detached.ksh \
functional/cli_root/zpool_import/import_cachefile_paths_changed.ksh \
functional/cli_root/zpool_import/import_cachefile_shared_device.ksh \
functional/cli_root/zpool_import/import_devices_missing.ksh \
functional/cli_root/zpool_import/import_log_missing.ksh \
functional/cli_root/zpool_import/import_paths_changed.ksh \
functional/cli_root/zpool_import/import_rewind_config_changed.ksh \
functional/cli_root/zpool_import/import_rewind_device_replaced.ksh \
functional/cli_root/zpool_import/setup.ksh \
functional/cli_root/zpool_import/zpool_import_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_004_pos.ksh \
functional/cli_root/zpool_import/zpool_import_005_pos.ksh \
functional/cli_root/zpool_import/zpool_import_006_pos.ksh \
functional/cli_root/zpool_import/zpool_import_007_pos.ksh \
functional/cli_root/zpool_import/zpool_import_008_pos.ksh \
functional/cli_root/zpool_import/zpool_import_009_neg.ksh \
functional/cli_root/zpool_import/zpool_import_010_pos.ksh \
functional/cli_root/zpool_import/zpool_import_011_neg.ksh \
functional/cli_root/zpool_import/zpool_import_012_pos.ksh \
functional/cli_root/zpool_import/zpool_import_013_neg.ksh \
functional/cli_root/zpool_import/zpool_import_014_pos.ksh \
functional/cli_root/zpool_import/zpool_import_015_pos.ksh \
functional/cli_root/zpool_import/zpool_import_016_pos.ksh \
functional/cli_root/zpool_import/zpool_import_017_pos.ksh \
functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted.ksh \
functional/cli_root/zpool_import/zpool_import_encrypted_load.ksh \
functional/cli_root/zpool_import/zpool_import_errata3.ksh \
functional/cli_root/zpool_import/zpool_import_errata4.ksh \
functional/cli_root/zpool_import/zpool_import_features_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_features_002_neg.ksh \
functional/cli_root/zpool_import/zpool_import_features_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh \
functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh \
functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh \
functional/cli_root/zpool_initialize/cleanup.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_attach_detach_add_remove.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_fault_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_import_export.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_offline_export_import_online.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_online_offline.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_split.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_neg.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_start_and_cancel_pos.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_suspend_resume.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_uninit.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_unsupported_vdevs.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_checksums.ksh \
functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_active.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_exported.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_removed.ksh \
functional/cli_root/zpool_labelclear/zpool_labelclear_valid.ksh \
functional/cli_root/zpool_offline/cleanup.ksh \
functional/cli_root/zpool_offline/setup.ksh \
functional/cli_root/zpool_offline/zpool_offline_001_pos.ksh \
functional/cli_root/zpool_offline/zpool_offline_002_neg.ksh \
functional/cli_root/zpool_offline/zpool_offline_003_pos.ksh \
functional/cli_root/zpool_online/cleanup.ksh \
functional/cli_root/zpool_online/setup.ksh \
functional/cli_root/zpool_online/zpool_online_001_pos.ksh \
functional/cli_root/zpool_online/zpool_online_002_neg.ksh \
functional/cli_root/zpool_remove/cleanup.ksh \
functional/cli_root/zpool_remove/setup.ksh \
functional/cli_root/zpool_remove/zpool_remove_001_neg.ksh \
functional/cli_root/zpool_remove/zpool_remove_002_pos.ksh \
functional/cli_root/zpool_remove/zpool_remove_003_pos.ksh \
functional/cli_root/zpool_reopen/cleanup.ksh \
functional/cli_root/zpool_reopen/setup.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh \
functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh \
functional/cli_root/zpool_replace/cleanup.ksh \
functional/cli_root/zpool_replace/replace-o_ashift.ksh \
functional/cli_root/zpool_replace/replace_prop_ashift.ksh \
functional/cli_root/zpool_replace/setup.ksh \
functional/cli_root/zpool_replace/zpool_replace_001_neg.ksh \
functional/cli_root/zpool_resilver/cleanup.ksh \
functional/cli_root/zpool_resilver/setup.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh \
functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh \
+ functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh \
functional/cli_root/zpool_scrub/cleanup.ksh \
functional/cli_root/zpool_scrub/setup.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_001_neg.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_005_pos.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_encrypted_unloaded.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_multiple_copies.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh \
functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_001_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_002_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_003_pos.ksh \
functional/cli_root/zpool_scrub/zpool_error_scrub_004_pos.ksh \
functional/cli_root/zpool_set/cleanup.ksh \
functional/cli_root/zpool_set/setup.ksh \
functional/cli_root/zpool/setup.ksh \
functional/cli_root/zpool_set/vdev_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_common.kshlib \
functional/cli_root/zpool_set/zpool_set_001_pos.ksh \
functional/cli_root/zpool_set/zpool_set_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_003_neg.ksh \
functional/cli_root/zpool_set/zpool_set_ashift.ksh \
functional/cli_root/zpool_set/user_property_001_pos.ksh \
functional/cli_root/zpool_set/user_property_002_neg.ksh \
functional/cli_root/zpool_set/zpool_set_features.ksh \
functional/cli_root/zpool_split/cleanup.ksh \
functional/cli_root/zpool_split/setup.ksh \
functional/cli_root/zpool_split/zpool_split_cliargs.ksh \
functional/cli_root/zpool_split/zpool_split_devices.ksh \
functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh \
functional/cli_root/zpool_split/zpool_split_encryption.ksh \
functional/cli_root/zpool_split/zpool_split_indirect.ksh \
functional/cli_root/zpool_split/zpool_split_props.ksh \
functional/cli_root/zpool_split/zpool_split_resilver.ksh \
functional/cli_root/zpool_split/zpool_split_vdevs.ksh \
functional/cli_root/zpool_split/zpool_split_wholedisk.ksh \
functional/cli_root/zpool_status/cleanup.ksh \
functional/cli_root/zpool_status/setup.ksh \
functional/cli_root/zpool_status/zpool_status_001_pos.ksh \
functional/cli_root/zpool_status/zpool_status_002_pos.ksh \
functional/cli_root/zpool_status/zpool_status_003_pos.ksh \
functional/cli_root/zpool_status/zpool_status_004_pos.ksh \
functional/cli_root/zpool_status/zpool_status_005_pos.ksh \
functional/cli_root/zpool_status/zpool_status_006_pos.ksh \
functional/cli_root/zpool_status/zpool_status_007_pos.ksh \
functional/cli_root/zpool_status/zpool_status_features_001_pos.ksh \
functional/cli_root/zpool_sync/cleanup.ksh \
functional/cli_root/zpool_sync/setup.ksh \
functional/cli_root/zpool_sync/zpool_sync_001_pos.ksh \
functional/cli_root/zpool_sync/zpool_sync_002_neg.ksh \
functional/cli_root/zpool_trim/cleanup.ksh \
functional/cli_root/zpool_trim/setup.ksh \
functional/cli_root/zpool_trim/zpool_trim_attach_detach_add_remove.ksh \
functional/cli_root/zpool_trim/zpool_trim_fault_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_import_export.ksh \
functional/cli_root/zpool_trim/zpool_trim_multiple.ksh \
functional/cli_root/zpool_trim/zpool_trim_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_offline_export_import_online.ksh \
functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh \
functional/cli_root/zpool_trim/zpool_trim_partial.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate.ksh \
functional/cli_root/zpool_trim/zpool_trim_rate_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_secure.ksh \
functional/cli_root/zpool_trim/zpool_trim_split.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh \
functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_pos.ksh \
functional/cli_root/zpool_trim/zpool_trim_suspend_resume.ksh \
functional/cli_root/zpool_trim/zpool_trim_unsupported_vdevs.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_checksums.ksh \
functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh \
functional/cli_root/zpool_upgrade/cleanup.ksh \
functional/cli_root/zpool_upgrade/setup.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_001_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_002_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_003_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_005_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_006_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_007_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_008_pos.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_009_neg.ksh \
functional/cli_root/zpool_upgrade/zpool_upgrade_features_001_pos.ksh \
functional/cli_root/zpool_wait/cleanup.ksh \
functional/cli_root/zpool_wait/scan/cleanup.ksh \
functional/cli_root/zpool_wait/scan/setup.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_rebuild.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_replace.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_resilver.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_basic.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh \
functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh \
functional/cli_root/zpool_wait/setup.ksh \
functional/cli_root/zpool_wait/zpool_wait_discard.ksh \
functional/cli_root/zpool_wait/zpool_wait_freeing.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_multiple.ksh \
functional/cli_root/zpool_wait/zpool_wait_no_activity.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_remove.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_basic.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_cancel.ksh \
functional/cli_root/zpool_wait/zpool_wait_trim_flag.ksh \
functional/cli_root/zpool_wait/zpool_wait_usage.ksh \
functional/cli_root/zpool/zpool_001_neg.ksh \
functional/cli_root/zpool/zpool_002_pos.ksh \
functional/cli_root/zpool/zpool_003_pos.ksh \
functional/cli_root/zpool/zpool_colors.ksh \
functional/cli_user/misc/arcstat_001_pos.ksh \
functional/cli_user/misc/arc_summary_001_pos.ksh \
functional/cli_user/misc/arc_summary_002_neg.ksh \
functional/cli_user/misc/zilstat_001_pos.ksh \
functional/cli_user/misc/cleanup.ksh \
functional/cli_user/misc/setup.ksh \
functional/cli_user/misc/zdb_001_neg.ksh \
functional/cli_user/misc/zfs_001_neg.ksh \
functional/cli_user/misc/zfs_allow_001_neg.ksh \
functional/cli_user/misc/zfs_clone_001_neg.ksh \
functional/cli_user/misc/zfs_create_001_neg.ksh \
functional/cli_user/misc/zfs_destroy_001_neg.ksh \
functional/cli_user/misc/zfs_get_001_neg.ksh \
functional/cli_user/misc/zfs_inherit_001_neg.ksh \
functional/cli_user/misc/zfs_mount_001_neg.ksh \
functional/cli_user/misc/zfs_promote_001_neg.ksh \
functional/cli_user/misc/zfs_receive_001_neg.ksh \
functional/cli_user/misc/zfs_rename_001_neg.ksh \
functional/cli_user/misc/zfs_rollback_001_neg.ksh \
functional/cli_user/misc/zfs_send_001_neg.ksh \
functional/cli_user/misc/zfs_set_001_neg.ksh \
functional/cli_user/misc/zfs_share_001_neg.ksh \
functional/cli_user/misc/zfs_snapshot_001_neg.ksh \
functional/cli_user/misc/zfs_unallow_001_neg.ksh \
functional/cli_user/misc/zfs_unmount_001_neg.ksh \
functional/cli_user/misc/zfs_unshare_001_neg.ksh \
functional/cli_user/misc/zfs_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_001_neg.ksh \
functional/cli_user/misc/zpool_add_001_neg.ksh \
functional/cli_user/misc/zpool_attach_001_neg.ksh \
functional/cli_user/misc/zpool_clear_001_neg.ksh \
functional/cli_user/misc/zpool_create_001_neg.ksh \
functional/cli_user/misc/zpool_destroy_001_neg.ksh \
functional/cli_user/misc/zpool_detach_001_neg.ksh \
functional/cli_user/misc/zpool_export_001_neg.ksh \
functional/cli_user/misc/zpool_get_001_neg.ksh \
functional/cli_user/misc/zpool_history_001_neg.ksh \
functional/cli_user/misc/zpool_import_001_neg.ksh \
functional/cli_user/misc/zpool_import_002_neg.ksh \
functional/cli_user/misc/zpool_offline_001_neg.ksh \
functional/cli_user/misc/zpool_online_001_neg.ksh \
functional/cli_user/misc/zpool_remove_001_neg.ksh \
functional/cli_user/misc/zpool_replace_001_neg.ksh \
functional/cli_user/misc/zpool_scrub_001_neg.ksh \
functional/cli_user/misc/zpool_set_001_neg.ksh \
functional/cli_user/misc/zpool_status_001_neg.ksh \
functional/cli_user/misc/zpool_upgrade_001_neg.ksh \
functional/cli_user/misc/zpool_wait_privilege.ksh \
functional/cli_user/zfs_list/cleanup.ksh \
functional/cli_user/zfs_list/setup.ksh \
functional/cli_user/zfs_list/zfs_list_001_pos.ksh \
functional/cli_user/zfs_list/zfs_list_002_pos.ksh \
functional/cli_user/zfs_list/zfs_list_003_pos.ksh \
functional/cli_user/zfs_list/zfs_list_004_neg.ksh \
functional/cli_user/zfs_list/zfs_list_005_neg.ksh \
functional/cli_user/zfs_list/zfs_list_007_pos.ksh \
functional/cli_user/zfs_list/zfs_list_008_neg.ksh \
functional/cli_user/zpool_iostat/cleanup.ksh \
functional/cli_user/zpool_iostat/setup.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_001_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_002_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_003_neg.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_004_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_005_pos.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_disable.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_homedir.ksh \
functional/cli_user/zpool_iostat/zpool_iostat_-c_searchpath.ksh \
functional/cli_user/zpool_list/cleanup.ksh \
functional/cli_user/zpool_list/setup.ksh \
functional/cli_user/zpool_list/zpool_list_001_pos.ksh \
functional/cli_user/zpool_list/zpool_list_002_neg.ksh \
functional/cli_user/zpool_status/cleanup.ksh \
functional/cli_user/zpool_status/setup.ksh \
functional/cli_user/zpool_status/zpool_status_003_pos.ksh \
functional/cli_user/zpool_status/zpool_status_-c_disable.ksh \
functional/cli_user/zpool_status/zpool_status_-c_homedir.ksh \
functional/cli_user/zpool_status/zpool_status_-c_searchpath.ksh \
functional/compression/cleanup.ksh \
functional/compression/compress_001_pos.ksh \
functional/compression/compress_002_pos.ksh \
functional/compression/compress_003_pos.ksh \
functional/compression/compress_004_pos.ksh \
functional/compression/compress_zstd_bswap.ksh \
functional/compression/l2arc_compressed_arc_disabled.ksh \
functional/compression/l2arc_compressed_arc.ksh \
functional/compression/l2arc_encrypted.ksh \
functional/compression/l2arc_encrypted_no_compressed_arc.ksh \
functional/compression/setup.ksh \
functional/cp_files/cleanup.ksh \
functional/cp_files/cp_files_001_pos.ksh \
functional/cp_files/setup.ksh \
functional/crtime/cleanup.ksh \
functional/crtime/crtime_001_pos.ksh \
functional/crtime/setup.ksh \
functional/ctime/cleanup.ksh \
functional/ctime/ctime_001_pos.ksh \
functional/ctime/setup.ksh \
functional/deadman/deadman_ratelimit.ksh \
functional/deadman/deadman_sync.ksh \
functional/deadman/deadman_zio.ksh \
functional/delegate/cleanup.ksh \
functional/delegate/setup.ksh \
functional/delegate/zfs_allow_001_pos.ksh \
functional/delegate/zfs_allow_002_pos.ksh \
functional/delegate/zfs_allow_003_pos.ksh \
functional/delegate/zfs_allow_004_pos.ksh \
functional/delegate/zfs_allow_005_pos.ksh \
functional/delegate/zfs_allow_006_pos.ksh \
functional/delegate/zfs_allow_007_pos.ksh \
functional/delegate/zfs_allow_008_pos.ksh \
functional/delegate/zfs_allow_009_neg.ksh \
functional/delegate/zfs_allow_010_pos.ksh \
functional/delegate/zfs_allow_011_neg.ksh \
functional/delegate/zfs_allow_012_neg.ksh \
functional/delegate/zfs_unallow_001_pos.ksh \
functional/delegate/zfs_unallow_002_pos.ksh \
functional/delegate/zfs_unallow_003_pos.ksh \
functional/delegate/zfs_unallow_004_pos.ksh \
functional/delegate/zfs_unallow_005_pos.ksh \
functional/delegate/zfs_unallow_006_pos.ksh \
functional/delegate/zfs_unallow_007_neg.ksh \
functional/delegate/zfs_unallow_008_neg.ksh \
functional/devices/cleanup.ksh \
functional/devices/devices_001_pos.ksh \
functional/devices/devices_002_neg.ksh \
functional/devices/devices_003_pos.ksh \
functional/devices/setup.ksh \
functional/dos_attributes/cleanup.ksh \
functional/dos_attributes/read_dos_attrs_001.ksh \
functional/dos_attributes/setup.ksh \
functional/dos_attributes/write_dos_attrs_001.ksh \
functional/events/cleanup.ksh \
functional/events/events_001_pos.ksh \
functional/events/events_002_pos.ksh \
functional/events/setup.ksh \
functional/events/zed_cksum_config.ksh \
functional/events/zed_cksum_reported.ksh \
functional/events/zed_fd_spill.ksh \
functional/events/zed_io_config.ksh \
functional/events/zed_rc_filter.ksh \
functional/exec/cleanup.ksh \
functional/exec/exec_001_pos.ksh \
functional/exec/exec_002_neg.ksh \
functional/exec/setup.ksh \
functional/fadvise/cleanup.ksh \
functional/fadvise/fadvise_sequential.ksh \
functional/fadvise/setup.ksh \
functional/fallocate/cleanup.ksh \
functional/fallocate/fallocate_prealloc.ksh \
functional/fallocate/fallocate_punch-hole.ksh \
functional/fallocate/fallocate_zero-range.ksh \
functional/fallocate/setup.ksh \
functional/fault/auto_offline_001_pos.ksh \
functional/fault/auto_online_001_pos.ksh \
functional/fault/auto_online_002_pos.ksh \
functional/fault/auto_replace_001_pos.ksh \
functional/fault/auto_spare_001_pos.ksh \
functional/fault/auto_spare_002_pos.ksh \
functional/fault/auto_spare_ashift.ksh \
functional/fault/auto_spare_multiple.ksh \
functional/fault/auto_spare_shared.ksh \
functional/fault/cleanup.ksh \
functional/fault/decompress_fault.ksh \
functional/fault/decrypt_fault.ksh \
functional/fault/scrub_after_resilver.ksh \
functional/fault/setup.ksh \
functional/fault/zpool_status_-s.ksh \
functional/features/async_destroy/async_destroy_001_pos.ksh \
functional/features/async_destroy/cleanup.ksh \
functional/features/async_destroy/setup.ksh \
functional/features/large_dnode/cleanup.ksh \
functional/features/large_dnode/large_dnode_001_pos.ksh \
functional/features/large_dnode/large_dnode_002_pos.ksh \
functional/features/large_dnode/large_dnode_003_pos.ksh \
functional/features/large_dnode/large_dnode_004_neg.ksh \
functional/features/large_dnode/large_dnode_005_pos.ksh \
functional/features/large_dnode/large_dnode_006_pos.ksh \
functional/features/large_dnode/large_dnode_007_neg.ksh \
functional/features/large_dnode/large_dnode_008_pos.ksh \
functional/features/large_dnode/large_dnode_009_pos.ksh \
functional/features/large_dnode/setup.ksh \
functional/grow/grow_pool_001_pos.ksh \
functional/grow/grow_replicas_001_pos.ksh \
functional/history/cleanup.ksh \
functional/history/history_001_pos.ksh \
functional/history/history_002_pos.ksh \
functional/history/history_003_pos.ksh \
functional/history/history_004_pos.ksh \
functional/history/history_005_neg.ksh \
functional/history/history_006_neg.ksh \
functional/history/history_007_pos.ksh \
functional/history/history_008_pos.ksh \
functional/history/history_009_pos.ksh \
functional/history/history_010_pos.ksh \
functional/history/setup.ksh \
functional/inheritance/cleanup.ksh \
functional/inheritance/inherit_001_pos.ksh \
functional/inuse/inuse_001_pos.ksh \
functional/inuse/inuse_003_pos.ksh \
functional/inuse/inuse_004_pos.ksh \
functional/inuse/inuse_005_pos.ksh \
functional/inuse/inuse_006_pos.ksh \
functional/inuse/inuse_007_pos.ksh \
functional/inuse/inuse_008_pos.ksh \
functional/inuse/inuse_009_pos.ksh \
functional/inuse/setup.ksh \
functional/io/cleanup.ksh \
functional/io/io_uring.ksh \
functional/io/libaio.ksh \
functional/io/mmap.ksh \
functional/io/posixaio.ksh \
functional/io/psync.ksh \
functional/io/setup.ksh \
functional/io/sync.ksh \
functional/l2arc/cleanup.ksh \
functional/l2arc/l2arc_arcstats_pos.ksh \
functional/l2arc/l2arc_l2miss_pos.ksh \
functional/l2arc/l2arc_mfuonly_pos.ksh \
functional/l2arc/persist_l2arc_001_pos.ksh \
functional/l2arc/persist_l2arc_002_pos.ksh \
functional/l2arc/persist_l2arc_003_neg.ksh \
functional/l2arc/persist_l2arc_004_pos.ksh \
functional/l2arc/persist_l2arc_005_pos.ksh \
functional/l2arc/setup.ksh \
functional/large_files/cleanup.ksh \
functional/large_files/large_files_001_pos.ksh \
functional/large_files/large_files_002_pos.ksh \
functional/large_files/setup.ksh \
functional/largest_pool/largest_pool_001_pos.ksh \
functional/libzfs/cleanup.ksh \
functional/libzfs/libzfs_input.ksh \
functional/libzfs/setup.ksh \
functional/limits/cleanup.ksh \
functional/limits/filesystem_count.ksh \
functional/limits/filesystem_limit.ksh \
functional/limits/setup.ksh \
functional/limits/snapshot_count.ksh \
functional/limits/snapshot_limit.ksh \
functional/link_count/cleanup.ksh \
functional/link_count/link_count_001.ksh \
functional/link_count/link_count_root_inode.ksh \
functional/link_count/setup.ksh \
functional/log_spacemap/log_spacemap_import_logs.ksh \
functional/migration/cleanup.ksh \
functional/migration/migration_001_pos.ksh \
functional/migration/migration_002_pos.ksh \
functional/migration/migration_003_pos.ksh \
functional/migration/migration_004_pos.ksh \
functional/migration/migration_005_pos.ksh \
functional/migration/migration_006_pos.ksh \
functional/migration/migration_007_pos.ksh \
functional/migration/migration_008_pos.ksh \
functional/migration/migration_009_pos.ksh \
functional/migration/migration_010_pos.ksh \
functional/migration/migration_011_pos.ksh \
functional/migration/migration_012_pos.ksh \
functional/migration/setup.ksh \
functional/mmap/cleanup.ksh \
functional/mmap/mmap_libaio_001_pos.ksh \
functional/mmap/mmap_mixed.ksh \
functional/mmap/mmap_read_001_pos.ksh \
functional/mmap/mmap_seek_001_pos.ksh \
functional/mmap/mmap_sync_001_pos.ksh \
functional/mmap/mmap_write_001_pos.ksh \
functional/mmap/setup.ksh \
functional/mmp/cleanup.ksh \
functional/mmp/mmp_active_import.ksh \
functional/mmp/mmp_exported_import.ksh \
functional/mmp/mmp_hostid.ksh \
functional/mmp/mmp_inactive_import.ksh \
functional/mmp/mmp_interval.ksh \
functional/mmp/mmp_on_off.ksh \
functional/mmp/mmp_on_thread.ksh \
functional/mmp/mmp_on_uberblocks.ksh \
functional/mmp/mmp_on_zdb.ksh \
functional/mmp/mmp_reset_interval.ksh \
functional/mmp/mmp_write_distribution.ksh \
functional/mmp/mmp_write_uberblocks.ksh \
functional/mmp/multihost_history.ksh \
functional/mmp/setup.ksh \
functional/mount/cleanup.ksh \
functional/mount/setup.ksh \
functional/mount/umount_001.ksh \
functional/mount/umountall_001.ksh \
functional/mount/umount_unlinked_drain.ksh \
functional/mv_files/cleanup.ksh \
functional/mv_files/mv_files_001_pos.ksh \
functional/mv_files/mv_files_002_pos.ksh \
functional/mv_files/random_creation.ksh \
functional/mv_files/setup.ksh \
functional/nestedfs/cleanup.ksh \
functional/nestedfs/nestedfs_001_pos.ksh \
functional/nestedfs/setup.ksh \
functional/nopwrite/cleanup.ksh \
functional/nopwrite/nopwrite_copies.ksh \
functional/nopwrite/nopwrite_mtime.ksh \
functional/nopwrite/nopwrite_negative.ksh \
functional/nopwrite/nopwrite_promoted_clone.ksh \
functional/nopwrite/nopwrite_recsize.ksh \
functional/nopwrite/nopwrite_sync.ksh \
functional/nopwrite/nopwrite_varying_compression.ksh \
functional/nopwrite/nopwrite_volume.ksh \
functional/nopwrite/setup.ksh \
functional/no_space/cleanup.ksh \
functional/no_space/enospc_001_pos.ksh \
functional/no_space/enospc_002_pos.ksh \
functional/no_space/enospc_003_pos.ksh \
functional/no_space/enospc_df.ksh \
functional/no_space/enospc_ganging.ksh \
functional/no_space/enospc_rm.ksh \
functional/no_space/setup.ksh \
functional/online_offline/cleanup.ksh \
functional/online_offline/online_offline_001_pos.ksh \
functional/online_offline/online_offline_002_neg.ksh \
functional/online_offline/online_offline_003_neg.ksh \
functional/online_offline/setup.ksh \
functional/pam/cleanup.ksh \
functional/pam/pam_basic.ksh \
functional/pam/pam_nounmount.ksh \
functional/pam/pam_short_password.ksh \
functional/pam/setup.ksh \
functional/pool_checkpoint/checkpoint_after_rewind.ksh \
functional/pool_checkpoint/checkpoint_big_rewind.ksh \
functional/pool_checkpoint/checkpoint_capacity.ksh \
functional/pool_checkpoint/checkpoint_conf_change.ksh \
functional/pool_checkpoint/checkpoint_discard_busy.ksh \
functional/pool_checkpoint/checkpoint_discard.ksh \
functional/pool_checkpoint/checkpoint_discard_many.ksh \
functional/pool_checkpoint/checkpoint_indirect.ksh \
functional/pool_checkpoint/checkpoint_invalid.ksh \
functional/pool_checkpoint/checkpoint_lun_expsz.ksh \
functional/pool_checkpoint/checkpoint_open.ksh \
functional/pool_checkpoint/checkpoint_removal.ksh \
functional/pool_checkpoint/checkpoint_rewind.ksh \
functional/pool_checkpoint/checkpoint_ro_rewind.ksh \
functional/pool_checkpoint/checkpoint_sm_scale.ksh \
functional/pool_checkpoint/checkpoint_twice.ksh \
functional/pool_checkpoint/checkpoint_vdev_add.ksh \
functional/pool_checkpoint/checkpoint_zdb.ksh \
functional/pool_checkpoint/checkpoint_zhack_feat.ksh \
functional/pool_checkpoint/cleanup.ksh \
functional/pool_checkpoint/setup.ksh \
functional/pool_names/pool_names_001_pos.ksh \
functional/pool_names/pool_names_002_neg.ksh \
functional/poolversion/cleanup.ksh \
functional/poolversion/poolversion_001_pos.ksh \
functional/poolversion/poolversion_002_pos.ksh \
functional/poolversion/setup.ksh \
functional/privilege/cleanup.ksh \
functional/privilege/privilege_001_pos.ksh \
functional/privilege/privilege_002_pos.ksh \
functional/privilege/setup.ksh \
functional/procfs/cleanup.ksh \
functional/procfs/pool_state.ksh \
functional/procfs/procfs_list_basic.ksh \
functional/procfs/procfs_list_concurrent_readers.ksh \
functional/procfs/procfs_list_stale_read.ksh \
functional/procfs/setup.ksh \
functional/projectquota/cleanup.ksh \
functional/projectquota/projectid_001_pos.ksh \
functional/projectquota/projectid_002_pos.ksh \
functional/projectquota/projectid_003_pos.ksh \
functional/projectquota/projectquota_001_pos.ksh \
functional/projectquota/projectquota_002_pos.ksh \
functional/projectquota/projectquota_003_pos.ksh \
functional/projectquota/projectquota_004_neg.ksh \
functional/projectquota/projectquota_005_pos.ksh \
functional/projectquota/projectquota_006_pos.ksh \
functional/projectquota/projectquota_007_pos.ksh \
functional/projectquota/projectquota_008_pos.ksh \
functional/projectquota/projectquota_009_pos.ksh \
functional/projectquota/projectspace_001_pos.ksh \
functional/projectquota/projectspace_002_pos.ksh \
functional/projectquota/projectspace_003_pos.ksh \
functional/projectquota/projectspace_004_pos.ksh \
functional/projectquota/projecttree_001_pos.ksh \
functional/projectquota/projecttree_002_pos.ksh \
functional/projectquota/projecttree_003_neg.ksh \
functional/projectquota/setup.ksh \
functional/quota/cleanup.ksh \
functional/quota/quota_001_pos.ksh \
functional/quota/quota_002_pos.ksh \
functional/quota/quota_003_pos.ksh \
functional/quota/quota_004_pos.ksh \
functional/quota/quota_005_pos.ksh \
functional/quota/quota_006_neg.ksh \
functional/quota/setup.ksh \
functional/raidz/cleanup.ksh \
functional/raidz/raidz_001_neg.ksh \
functional/raidz/raidz_002_pos.ksh \
functional/raidz/raidz_003_pos.ksh \
functional/raidz/raidz_004_pos.ksh \
functional/raidz/setup.ksh \
functional/redacted_send/cleanup.ksh \
functional/redacted_send/redacted_compressed.ksh \
functional/redacted_send/redacted_contents.ksh \
functional/redacted_send/redacted_deleted.ksh \
functional/redacted_send/redacted_disabled_feature.ksh \
functional/redacted_send/redacted_embedded.ksh \
functional/redacted_send/redacted_holes.ksh \
functional/redacted_send/redacted_incrementals.ksh \
functional/redacted_send/redacted_largeblocks.ksh \
functional/redacted_send/redacted_many_clones.ksh \
functional/redacted_send/redacted_mixed_recsize.ksh \
functional/redacted_send/redacted_mounts.ksh \
functional/redacted_send/redacted_negative.ksh \
functional/redacted_send/redacted_origin.ksh \
functional/redacted_send/redacted_panic.ksh \
functional/redacted_send/redacted_props.ksh \
functional/redacted_send/redacted_resume.ksh \
functional/redacted_send/redacted_size.ksh \
functional/redacted_send/redacted_volume.ksh \
functional/redacted_send/setup.ksh \
functional/redundancy/cleanup.ksh \
functional/redundancy/redundancy_draid1.ksh \
functional/redundancy/redundancy_draid2.ksh \
functional/redundancy/redundancy_draid3.ksh \
functional/redundancy/redundancy_draid_damaged1.ksh \
functional/redundancy/redundancy_draid_damaged2.ksh \
functional/redundancy/redundancy_draid.ksh \
functional/redundancy/redundancy_draid_spare1.ksh \
functional/redundancy/redundancy_draid_spare2.ksh \
functional/redundancy/redundancy_draid_spare3.ksh \
functional/redundancy/redundancy_mirror.ksh \
functional/redundancy/redundancy_raidz1.ksh \
functional/redundancy/redundancy_raidz2.ksh \
functional/redundancy/redundancy_raidz3.ksh \
functional/redundancy/redundancy_raidz.ksh \
functional/redundancy/redundancy_stripe.ksh \
functional/redundancy/setup.ksh \
functional/refquota/cleanup.ksh \
functional/refquota/refquota_001_pos.ksh \
functional/refquota/refquota_002_pos.ksh \
functional/refquota/refquota_003_pos.ksh \
functional/refquota/refquota_004_pos.ksh \
functional/refquota/refquota_005_pos.ksh \
functional/refquota/refquota_006_neg.ksh \
functional/refquota/refquota_007_neg.ksh \
functional/refquota/refquota_008_neg.ksh \
functional/refquota/setup.ksh \
functional/refreserv/cleanup.ksh \
functional/refreserv/refreserv_001_pos.ksh \
functional/refreserv/refreserv_002_pos.ksh \
functional/refreserv/refreserv_003_pos.ksh \
functional/refreserv/refreserv_004_pos.ksh \
functional/refreserv/refreserv_005_pos.ksh \
functional/refreserv/refreserv_multi_raidz.ksh \
functional/refreserv/refreserv_raidz.ksh \
functional/refreserv/setup.ksh \
functional/removal/cleanup.ksh \
functional/removal/removal_all_vdev.ksh \
functional/removal/removal_cancel.ksh \
functional/removal/removal_check_space.ksh \
functional/removal/removal_condense_export.ksh \
functional/removal/removal_multiple_indirection.ksh \
functional/removal/removal_nopwrite.ksh \
functional/removal/removal_remap_deadlists.ksh \
functional/removal/removal_reservation.ksh \
functional/removal/removal_resume_export.ksh \
functional/removal/removal_sanity.ksh \
functional/removal/removal_with_add.ksh \
functional/removal/removal_with_create_fs.ksh \
functional/removal/removal_with_dedup.ksh \
functional/removal/removal_with_errors.ksh \
functional/removal/removal_with_export.ksh \
functional/removal/removal_with_faulted.ksh \
functional/removal/removal_with_ganging.ksh \
functional/removal/removal_with_indirect.ksh \
functional/removal/removal_with_remove.ksh \
functional/removal/removal_with_scrub.ksh \
functional/removal/removal_with_send.ksh \
functional/removal/removal_with_send_recv.ksh \
functional/removal/removal_with_snapshot.ksh \
functional/removal/removal_with_write.ksh \
functional/removal/removal_with_zdb.ksh \
functional/removal/remove_attach_mirror.ksh \
functional/removal/remove_expanded.ksh \
functional/removal/remove_indirect.ksh \
functional/removal/remove_mirror.ksh \
functional/removal/remove_mirror_sanity.ksh \
functional/removal/remove_raidz.ksh \
functional/rename_dirs/cleanup.ksh \
functional/rename_dirs/rename_dirs_001_pos.ksh \
functional/rename_dirs/setup.ksh \
functional/renameat2/cleanup.ksh \
functional/renameat2/setup.ksh \
functional/renameat2/renameat2_exchange.ksh \
functional/renameat2/renameat2_noreplace.ksh \
functional/renameat2/renameat2_whiteout.ksh \
functional/replacement/attach_import.ksh \
functional/replacement/attach_multiple.ksh \
functional/replacement/attach_rebuild.ksh \
functional/replacement/attach_resilver.ksh \
functional/replacement/cleanup.ksh \
functional/replacement/detach.ksh \
functional/replacement/rebuild_disabled_feature.ksh \
functional/replacement/rebuild_multiple.ksh \
functional/replacement/rebuild_raidz.ksh \
functional/replacement/replace_import.ksh \
functional/replacement/replace_rebuild.ksh \
functional/replacement/replace_resilver.ksh \
functional/replacement/resilver_restart_001.ksh \
functional/replacement/resilver_restart_002.ksh \
functional/replacement/scrub_cancel.ksh \
functional/replacement/setup.ksh \
functional/reservation/cleanup.ksh \
functional/reservation/reservation_001_pos.ksh \
functional/reservation/reservation_002_pos.ksh \
functional/reservation/reservation_003_pos.ksh \
functional/reservation/reservation_004_pos.ksh \
functional/reservation/reservation_005_pos.ksh \
functional/reservation/reservation_006_pos.ksh \
functional/reservation/reservation_007_pos.ksh \
functional/reservation/reservation_008_pos.ksh \
functional/reservation/reservation_009_pos.ksh \
functional/reservation/reservation_010_pos.ksh \
functional/reservation/reservation_011_pos.ksh \
functional/reservation/reservation_012_pos.ksh \
functional/reservation/reservation_013_pos.ksh \
functional/reservation/reservation_014_pos.ksh \
functional/reservation/reservation_015_pos.ksh \
functional/reservation/reservation_016_pos.ksh \
functional/reservation/reservation_017_pos.ksh \
functional/reservation/reservation_018_pos.ksh \
functional/reservation/reservation_019_pos.ksh \
functional/reservation/reservation_020_pos.ksh \
functional/reservation/reservation_021_neg.ksh \
functional/reservation/reservation_022_pos.ksh \
functional/reservation/setup.ksh \
functional/rootpool/cleanup.ksh \
functional/rootpool/rootpool_002_neg.ksh \
functional/rootpool/rootpool_003_neg.ksh \
functional/rootpool/rootpool_007_pos.ksh \
functional/rootpool/setup.ksh \
functional/rsend/cleanup.ksh \
functional/rsend/recv_dedup_encrypted_zvol.ksh \
functional/rsend/recv_dedup.ksh \
functional/rsend/rsend_001_pos.ksh \
functional/rsend/rsend_002_pos.ksh \
functional/rsend/rsend_003_pos.ksh \
functional/rsend/rsend_004_pos.ksh \
functional/rsend/rsend_005_pos.ksh \
functional/rsend/rsend_006_pos.ksh \
functional/rsend/rsend_007_pos.ksh \
functional/rsend/rsend_008_pos.ksh \
functional/rsend/rsend_009_pos.ksh \
functional/rsend/rsend_010_pos.ksh \
functional/rsend/rsend_011_pos.ksh \
functional/rsend/rsend_012_pos.ksh \
functional/rsend/rsend_013_pos.ksh \
functional/rsend/rsend_014_pos.ksh \
functional/rsend/rsend_016_neg.ksh \
functional/rsend/rsend_019_pos.ksh \
functional/rsend/rsend_020_pos.ksh \
functional/rsend/rsend_021_pos.ksh \
functional/rsend/rsend_022_pos.ksh \
functional/rsend/rsend_024_pos.ksh \
functional/rsend/rsend_025_pos.ksh \
functional/rsend/rsend_026_neg.ksh \
functional/rsend/rsend_027_pos.ksh \
functional/rsend/rsend_028_neg.ksh \
functional/rsend/rsend_029_neg.ksh \
functional/rsend/rsend_030_pos.ksh \
functional/rsend/rsend_031_pos.ksh \
functional/rsend/send-c_embedded_blocks.ksh \
functional/rsend/send-c_incremental.ksh \
functional/rsend/send-c_lz4_disabled.ksh \
functional/rsend/send-c_mixed_compression.ksh \
functional/rsend/send-c_props.ksh \
functional/rsend/send-c_recv_dedup.ksh \
functional/rsend/send-c_recv_lz4_disabled.ksh \
functional/rsend/send-c_resume.ksh \
functional/rsend/send-c_stream_size_estimate.ksh \
functional/rsend/send-c_verify_contents.ksh \
functional/rsend/send-c_verify_ratio.ksh \
functional/rsend/send-c_volume.ksh \
functional/rsend/send-c_zstream_recompress.ksh \
functional/rsend/send-c_zstreamdump.ksh \
functional/rsend/send-cpL_varied_recsize.ksh \
functional/rsend/send_doall.ksh \
functional/rsend/send_encrypted_incremental.ksh \
functional/rsend/send_encrypted_files.ksh \
functional/rsend/send_encrypted_freeobjects.ksh \
functional/rsend/send_encrypted_hierarchy.ksh \
functional/rsend/send_encrypted_props.ksh \
functional/rsend/send_encrypted_truncated_files.ksh \
functional/rsend/send_freeobjects.ksh \
functional/rsend/send_holds.ksh \
functional/rsend/send_hole_birth.ksh \
functional/rsend/send_invalid.ksh \
functional/rsend/send-L_toggle.ksh \
functional/rsend/send_mixed_raw.ksh \
functional/rsend/send_partial_dataset.ksh \
functional/rsend/send_raw_ashift.ksh \
functional/rsend/send_raw_spill_block.ksh \
functional/rsend/send_raw_large_blocks.ksh \
functional/rsend/send_realloc_dnode_size.ksh \
functional/rsend/send_realloc_encrypted_files.ksh \
functional/rsend/send_realloc_files.ksh \
functional/rsend/send_spill_block.ksh \
functional/rsend/send-wR_encrypted_zvol.ksh \
functional/rsend/setup.ksh \
functional/scrub_mirror/cleanup.ksh \
functional/scrub_mirror/scrub_mirror_001_pos.ksh \
functional/scrub_mirror/scrub_mirror_002_pos.ksh \
functional/scrub_mirror/scrub_mirror_003_pos.ksh \
functional/scrub_mirror/scrub_mirror_004_pos.ksh \
functional/scrub_mirror/setup.ksh \
functional/slog/cleanup.ksh \
functional/slog/setup.ksh \
functional/slog/slog_001_pos.ksh \
functional/slog/slog_002_pos.ksh \
functional/slog/slog_003_pos.ksh \
functional/slog/slog_004_pos.ksh \
functional/slog/slog_005_pos.ksh \
functional/slog/slog_006_pos.ksh \
functional/slog/slog_007_pos.ksh \
functional/slog/slog_008_neg.ksh \
functional/slog/slog_009_neg.ksh \
functional/slog/slog_010_neg.ksh \
functional/slog/slog_011_neg.ksh \
functional/slog/slog_012_neg.ksh \
functional/slog/slog_013_pos.ksh \
functional/slog/slog_014_pos.ksh \
functional/slog/slog_015_neg.ksh \
functional/slog/slog_016_pos.ksh \
functional/slog/slog_replay_fs_001.ksh \
functional/slog/slog_replay_fs_002.ksh \
functional/slog/slog_replay_volume.ksh \
functional/snapshot/cleanup.ksh \
functional/snapshot/clone_001_pos.ksh \
functional/snapshot/rollback_001_pos.ksh \
functional/snapshot/rollback_002_pos.ksh \
functional/snapshot/rollback_003_pos.ksh \
functional/snapshot/setup.ksh \
functional/snapshot/snapshot_001_pos.ksh \
functional/snapshot/snapshot_002_pos.ksh \
functional/snapshot/snapshot_003_pos.ksh \
functional/snapshot/snapshot_004_pos.ksh \
functional/snapshot/snapshot_005_pos.ksh \
functional/snapshot/snapshot_006_pos.ksh \
functional/snapshot/snapshot_007_pos.ksh \
functional/snapshot/snapshot_008_pos.ksh \
functional/snapshot/snapshot_009_pos.ksh \
functional/snapshot/snapshot_010_pos.ksh \
functional/snapshot/snapshot_011_pos.ksh \
functional/snapshot/snapshot_012_pos.ksh \
functional/snapshot/snapshot_013_pos.ksh \
functional/snapshot/snapshot_014_pos.ksh \
functional/snapshot/snapshot_015_pos.ksh \
functional/snapshot/snapshot_016_pos.ksh \
functional/snapshot/snapshot_017_pos.ksh \
functional/snapshot/snapshot_018_pos.ksh \
functional/snapused/cleanup.ksh \
functional/snapused/setup.ksh \
functional/snapused/snapused_001_pos.ksh \
functional/snapused/snapused_002_pos.ksh \
functional/snapused/snapused_003_pos.ksh \
functional/snapused/snapused_004_pos.ksh \
functional/snapused/snapused_005_pos.ksh \
functional/sparse/cleanup.ksh \
functional/sparse/setup.ksh \
functional/sparse/sparse_001_pos.ksh \
functional/stat/cleanup.ksh \
functional/stat/setup.ksh \
functional/stat/stat_001_pos.ksh \
functional/suid/cleanup.ksh \
functional/suid/setup.ksh \
functional/suid/suid_write_to_none.ksh \
functional/suid/suid_write_to_sgid.ksh \
functional/suid/suid_write_to_suid.ksh \
functional/suid/suid_write_to_suid_sgid.ksh \
functional/suid/suid_write_zil_replay.ksh \
functional/trim/autotrim_config.ksh \
functional/trim/autotrim_integrity.ksh \
functional/trim/autotrim_trim_integrity.ksh \
functional/trim/cleanup.ksh \
functional/trim/setup.ksh \
functional/trim/trim_config.ksh \
functional/trim/trim_integrity.ksh \
functional/trim/trim_l2arc.ksh \
functional/truncate/cleanup.ksh \
functional/truncate/setup.ksh \
functional/truncate/truncate_001_pos.ksh \
functional/truncate/truncate_002_pos.ksh \
functional/truncate/truncate_timestamps.ksh \
functional/upgrade/cleanup.ksh \
functional/upgrade/setup.ksh \
functional/upgrade/upgrade_projectquota_001_pos.ksh \
functional/upgrade/upgrade_readonly_pool.ksh \
functional/upgrade/upgrade_userobj_001_pos.ksh \
functional/user_namespace/cleanup.ksh \
functional/user_namespace/setup.ksh \
functional/user_namespace/user_namespace_001.ksh \
functional/user_namespace/user_namespace_002.ksh \
functional/user_namespace/user_namespace_003.ksh \
functional/user_namespace/user_namespace_004.ksh \
functional/userquota/cleanup.ksh \
functional/userquota/groupspace_001_pos.ksh \
functional/userquota/groupspace_002_pos.ksh \
functional/userquota/groupspace_003_pos.ksh \
functional/userquota/setup.ksh \
functional/userquota/userquota_001_pos.ksh \
functional/userquota/userquota_002_pos.ksh \
functional/userquota/userquota_003_pos.ksh \
functional/userquota/userquota_004_pos.ksh \
functional/userquota/userquota_005_neg.ksh \
functional/userquota/userquota_006_pos.ksh \
functional/userquota/userquota_007_pos.ksh \
functional/userquota/userquota_008_pos.ksh \
functional/userquota/userquota_009_pos.ksh \
functional/userquota/userquota_010_pos.ksh \
functional/userquota/userquota_011_pos.ksh \
functional/userquota/userquota_012_neg.ksh \
functional/userquota/userquota_013_pos.ksh \
functional/userquota/userspace_001_pos.ksh \
functional/userquota/userspace_002_pos.ksh \
functional/userquota/userspace_003_pos.ksh \
functional/userquota/userspace_encrypted.ksh \
functional/userquota/userspace_send_encrypted.ksh \
functional/userquota/userspace_encrypted_13709.ksh \
functional/vdev_zaps/cleanup.ksh \
functional/vdev_zaps/setup.ksh \
functional/vdev_zaps/vdev_zaps_001_pos.ksh \
functional/vdev_zaps/vdev_zaps_002_pos.ksh \
functional/vdev_zaps/vdev_zaps_003_pos.ksh \
functional/vdev_zaps/vdev_zaps_004_pos.ksh \
functional/vdev_zaps/vdev_zaps_005_pos.ksh \
functional/vdev_zaps/vdev_zaps_006_pos.ksh \
functional/vdev_zaps/vdev_zaps_007_pos.ksh \
functional/write_dirs/cleanup.ksh \
functional/write_dirs/setup.ksh \
functional/write_dirs/write_dirs_001_pos.ksh \
functional/write_dirs/write_dirs_002_pos.ksh \
functional/xattr/cleanup.ksh \
functional/xattr/setup.ksh \
functional/xattr/xattr_001_pos.ksh \
functional/xattr/xattr_002_neg.ksh \
functional/xattr/xattr_003_neg.ksh \
functional/xattr/xattr_004_pos.ksh \
functional/xattr/xattr_005_pos.ksh \
functional/xattr/xattr_006_pos.ksh \
functional/xattr/xattr_007_neg.ksh \
functional/xattr/xattr_008_pos.ksh \
functional/xattr/xattr_009_neg.ksh \
functional/xattr/xattr_010_neg.ksh \
functional/xattr/xattr_011_pos.ksh \
functional/xattr/xattr_012_pos.ksh \
functional/xattr/xattr_013_pos.ksh \
functional/xattr/xattr_compat.ksh \
functional/zpool_influxdb/cleanup.ksh \
functional/zpool_influxdb/setup.ksh \
functional/zpool_influxdb/zpool_influxdb.ksh \
functional/zvol/zvol_cli/cleanup.ksh \
functional/zvol/zvol_cli/setup.ksh \
functional/zvol/zvol_cli/zvol_cli_001_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_002_pos.ksh \
functional/zvol/zvol_cli/zvol_cli_003_neg.ksh \
functional/zvol/zvol_ENOSPC/cleanup.ksh \
functional/zvol/zvol_ENOSPC/setup.ksh \
functional/zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos.ksh \
functional/zvol/zvol_misc/cleanup.ksh \
functional/zvol/zvol_misc/setup.ksh \
functional/zvol/zvol_misc/zvol_misc_001_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_002_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_003_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_004_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_005_neg.ksh \
functional/zvol/zvol_misc/zvol_misc_006_pos.ksh \
functional/zvol/zvol_misc/zvol_misc_fua.ksh \
functional/zvol/zvol_misc/zvol_misc_hierarchy.ksh \
functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh \
functional/zvol/zvol_misc/zvol_misc_snapdev.ksh \
functional/zvol/zvol_misc/zvol_misc_trim.ksh \
functional/zvol/zvol_misc/zvol_misc_volmode.ksh \
functional/zvol/zvol_misc/zvol_misc_zil.ksh \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_002_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_003_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_004_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_005_pos.ksh \
functional/zvol/zvol_swap/zvol_swap_006_pos.ksh \
functional/idmap_mount/cleanup.ksh \
functional/idmap_mount/setup.ksh \
functional/idmap_mount/idmap_mount_001.ksh \
functional/idmap_mount/idmap_mount_002.ksh \
functional/idmap_mount/idmap_mount_003.ksh \
functional/idmap_mount/idmap_mount_004.ksh \
functional/idmap_mount/idmap_mount_005.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_backup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_backup.ksh
new file mode 100755
index 000000000000..d98ab86ab667
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_backup.ksh
@@ -0,0 +1,55 @@
+#!/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2023, Klara Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+write_count=8
+blksize=131072
+
+tmpfile=$TEST_BASE_DIR/tmpfile
+
+function cleanup
+{
+ datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm $tmpfile.1 $tmpfile.2
+}
+
+log_onexit cleanup
+
+log_assert "Verify that zfs send and zdb -B produce the same stream"
+
+verify_runnable "global"
+verify_disk_count "$DISKS" 2
+
+default_mirror_setup_noexit $DISKS
+file_write -o create -w -f $TESTDIR/file -b $blksize -c $write_count
+
+snap=$TESTPOOL/$TESTFS@snap
+log_must zfs snapshot $snap
+typeset -i objsetid=$(zfs get -Ho value objsetid $snap)
+
+sync_pool $TESTPOOL
+
+log_must eval "zfs send -ecL $snap > $tmpfile.1"
+log_must eval "zdb -B $TESTPOOL/$objsetid ecL > $tmpfile.2"
+
+typeset sum1=$(cat $tmpfile.1 | md5sum)
+typeset sum2=$(cat $tmpfile.2 | md5sum)
+
+log_must test "$sum1" = "$sum2"
+
+log_pass "zfs send and zdb -B produce the same stream"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh
new file mode 100755
index 000000000000..4c3b09796869
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_concurrent.ksh
@@ -0,0 +1,101 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2023 Hewlett Packard Enterprise Development LP.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/redundancy/redundancy.kshlib
+
+#
+# DESCRIPTION:
+# Verify 'zpool clear' doesn't cause concurrent resilvers
+#
+# STRATEGY:
+# 1. Create N(10) virtual disk files.
+# 2. Create draid pool based on the virtual disk files.
+# 3. Fill the filesystem with directories and files.
+# 4. Force-fault 2 vdevs and verify distributed spare is kicked in.
+# 5. Free the distributed spare by replacing the faulty drive.
+# 6. Run zpool clear and verify that it does not initiate 2 resilvers
+# concurrently while distributed spare gets kicked in.
+#
+
+verify_runnable "global"
+
+typeset -ir devs=10
+typeset -ir nparity=1
+typeset -ir ndata=8
+typeset -ir dspare=1
+
+function cleanup
+{
+ poolexists "$TESTPOOL" && destroy_pool "$TESTPOOL"
+
+ for i in {0..$devs}; do
+ log_must rm -f "$BASEDIR/vdev$i"
+ done
+
+ for dir in $BASEDIR; do
+ if [[ -d $dir ]]; then
+ log_must rm -rf $dir
+ fi
+ done
+
+ zed_stop
+ zed_cleanup
+}
+
+log_assert "Verify zpool clear on draid pool doesn't cause concurrent resilvers"
+log_onexit cleanup
+
+setup_test_env $TESTPOOL draid${nparity}:${ndata}d:${dspare}s $devs
+
+# ZED needed for sequential resilver
+zed_setup
+log_must zed_start
+
+log_must zpool offline -f $TESTPOOL $BASEDIR/vdev5
+log_must wait_vdev_state $TESTPOOL draid1-0-0 "ONLINE" 60
+log_must zpool wait -t resilver $TESTPOOL
+log_must zpool offline -f $TESTPOOL $BASEDIR/vdev6
+
+log_must zpool labelclear -f $BASEDIR/vdev5
+log_must zpool labelclear -f $BASEDIR/vdev6
+
+log_must zpool replace -w $TESTPOOL $BASEDIR/vdev5
+sync_pool $TESTPOOL
+
+log_must zpool events -c
+log_must zpool clear $TESTPOOL
+log_must wait_vdev_state $TESTPOOL draid1-0-0 "ONLINE" 60
+log_must zpool wait -t resilver $TESTPOOL
+log_must zpool wait -t scrub $TESTPOOL
+
+nof_resilver=$(zpool events | grep -c resilver_start)
+if [ $nof_resilver = 1 ] ; then
+ log_must verify_pool $TESTPOOL
+ log_pass "zpool clear on draid pool doesn't cause concurrent resilvers"
+else
+ log_fail "FAIL: sequential and healing resilver initiated concurrently"
+fi
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh
index 9bf6a94cfc84..9deee67a56ca 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_user/misc/zilstat_001_pos.ksh
@@ -1,37 +1,37 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
. $STF_SUITE/include/libtest.shlib
is_freebsd && ! python3 -c 'import sysctl' 2>/dev/null && log_unsupported "python3 sysctl module missing"
set -A args "" "-s \",\"" "-v" \
- "-f time,zcwc,zimnb,zimsb"
+ "-f time,cwc,imnb,imsb"
log_assert "zilstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
log_must eval "zilstat ${args[i]} > /dev/null"
((i = i + 1))
done
log_pass "zilstat generates output and doesn't return an error code"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
index 971c7fce64e5..dbcb175ed069 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/cleanup.ksh
@@ -1,29 +1,30 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
. $STF_SUITE/tests/functional/pam/utilities.kshlib
rmconfig
destroy_pool $TESTPOOL
del_user ${username}
+del_user ${username}rec
del_group pamtestgroup
log_must rm -rf "$runstatedir" $TESTDIRS
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_change_unmounted.ksh
similarity index 54%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_change_unmounted.ksh
index 443e07d7f003..91b202f7609d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_change_unmounted.ksh
@@ -1,88 +1,55 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
-#
-# Copyright 2021 Attila Fülöp <attila@fueloep.org>
-#
-
-
. $STF_SUITE/tests/functional/pam/utilities.kshlib
if [ -n "$ASAN_OPTIONS" ]; then
export LD_PRELOAD=$(ldd "$(command -v zfs)" | awk '/libasan\.so/ {print $3}')
fi
-if [[ -z pamservice ]]; then
- pamservice=pam_zfs_key_test
-fi
-
-# DESCRIPTION:
-# If we set the encryption passphrase for a dataset via pam_zfs_key, a minimal
-# passphrase length isn't enforced. This leads to a non-loadable key if
-# `zfs load-key` enforces a minimal length. Make sure this isn't the case.
-
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
genconfig "homes=$TESTPOOL/pam runstatedir=${runstatedir}"
-# Load keys and mount userdir.
-echo "testpass" | pamtester ${pamservice} ${username} open_session
-references 1
-log_must ismounted "$TESTPOOL/pam/${username}"
-keystatus available
-
-# Change user and dataset password to short one.
-printf "short\nshort\n" | pamtester ${pamservice} ${username} chauthtok
-
-# Unmount and unload key.
-log_must pamtester ${pamservice} ${username} close_session
-references 0
-log_mustnot ismounted "$TESTPOOL/pam/${username}"
-keystatus unavailable
+printf "testpass\nsecondpass\nsecondpass\n" | pamtester -v ${pamservice} ${username} chauthtok
-# Check if password change succeeded.
-echo "testpass" | pamtester ${pamservice} ${username} open_session
-references 1
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
-log_must pamtester ${pamservice} ${username} close_session
-references 0
-echo "short" | pamtester ${pamservice} ${username} open_session
+echo "secondpass" | pamtester ${pamservice} ${username} open_session
references 1
log_must ismounted "$TESTPOOL/pam/${username}"
keystatus available
+printf "secondpass\ntestpass\ntestpass\n" | pamtester -v ${pamservice} ${username} chauthtok
+
+log_must ismounted "$TESTPOOL/pam/${username}"
+log_must ismounted "$TESTPOOL/pam/${username}"
+keystatus available
-# Finally check if `zfs load-key` succeeds with the short password.
log_must pamtester ${pamservice} ${username} close_session
references 0
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
-echo "short" | zfs load-key "$TESTPOOL/pam/${username}"
-keystatus available
-zfs unload-key "$TESTPOOL/pam/${username}"
-keystatus unavailable
-
log_pass "done."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_recursive.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_recursive.ksh
new file mode 100755
index 000000000000..3714b179b852
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_recursive.ksh
@@ -0,0 +1,72 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/tests/functional/pam/utilities.kshlib
+
+if [ -n "$ASAN_OPTIONS" ]; then
+ export LD_PRELOAD=$(ldd "$(command -v zfs)" | awk '/libasan\.so/ {print $3}')
+fi
+
+username="${username}rec"
+
+# Set up a deeper hierarchy, a mountpoint that doesn't interfere with other tests,
+# and a user which references that mountpoint
+log_must zfs create "$TESTPOOL/pampam"
+log_must zfs create -o mountpoint="$TESTDIR/rec" "$TESTPOOL/pampam/pam"
+echo "recurpass" | zfs create -o encryption=aes-256-gcm -o keyformat=passphrase \
+ -o keylocation=prompt "$TESTPOOL/pampam/pam/${username}"
+log_must zfs unmount "$TESTPOOL/pampam/pam/${username}"
+log_must zfs unload-key "$TESTPOOL/pampam/pam/${username}"
+log_must add_user pamtestgroup ${username} "$TESTDIR/rec"
+
+function keystatus {
+ log_must [ "$(get_prop keystatus "$TESTPOOL/pampam/pam/${username}")" = "$1" ]
+}
+
+log_mustnot ismounted "$TESTPOOL/pampam/pam/${username}"
+keystatus unavailable
+
+function test_session {
+ echo "recurpass" | pamtester ${pamservice} ${username} open_session
+ references 1
+ log_must ismounted "$TESTPOOL/pampam/pam/${username}"
+ keystatus available
+
+ log_must pamtester ${pamservice} ${username} close_session
+ references 0
+ log_mustnot ismounted "$TESTPOOL/pampam/pam/${username}"
+ keystatus unavailable
+}
+
+genconfig "homes=$TESTPOOL/pampam/pam prop_mountpoint runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=$TESTPOOL/pampam recursive_homes prop_mountpoint runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=$TESTPOOL recursive_homes prop_mountpoint runstatedir=${runstatedir}"
+test_session
+
+genconfig "homes=* recursive_homes prop_mountpoint runstatedir=${runstatedir}"
+test_session
+
+log_pass "done."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh
index 443e07d7f003..079608583a72 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pam/pam_short_password.ksh
@@ -1,88 +1,88 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2021 Attila Fülöp <attila@fueloep.org>
#
. $STF_SUITE/tests/functional/pam/utilities.kshlib
if [ -n "$ASAN_OPTIONS" ]; then
export LD_PRELOAD=$(ldd "$(command -v zfs)" | awk '/libasan\.so/ {print $3}')
fi
if [[ -z pamservice ]]; then
pamservice=pam_zfs_key_test
fi
# DESCRIPTION:
# If we set the encryption passphrase for a dataset via pam_zfs_key, a minimal
# passphrase length isn't enforced. This leads to a non-loadable key if
# `zfs load-key` enforces a minimal length. Make sure this isn't the case.
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
genconfig "homes=$TESTPOOL/pam runstatedir=${runstatedir}"
# Load keys and mount userdir.
echo "testpass" | pamtester ${pamservice} ${username} open_session
references 1
log_must ismounted "$TESTPOOL/pam/${username}"
keystatus available
# Change user and dataset password to short one.
-printf "short\nshort\n" | pamtester ${pamservice} ${username} chauthtok
+printf "testpass\nshort\nshort\n" | pamtester -v ${pamservice} ${username} chauthtok
# Unmount and unload key.
log_must pamtester ${pamservice} ${username} close_session
references 0
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
# Check if password change succeeded.
echo "testpass" | pamtester ${pamservice} ${username} open_session
references 1
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
log_must pamtester ${pamservice} ${username} close_session
references 0
echo "short" | pamtester ${pamservice} ${username} open_session
references 1
log_must ismounted "$TESTPOOL/pam/${username}"
keystatus available
# Finally check if `zfs load-key` succeeds with the short password.
log_must pamtester ${pamservice} ${username} close_session
references 0
log_mustnot ismounted "$TESTPOOL/pam/${username}"
keystatus unavailable
echo "short" | zfs load-key "$TESTPOOL/pam/${username}"
keystatus available
zfs unload-key "$TESTPOOL/pam/${username}"
keystatus unavailable
log_pass "done."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
index f970935f5bd0..087aef9027ea 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
@@ -1,111 +1,113 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017, 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
#
# DESCRIPTION:
# Discard checkpoint on a stressed pool. Ensure that we can
# export and import the pool while discarding but not run any
# operations that have to do with the checkpoint or change the
# pool's config.
#
# STRATEGY:
# 1. Import pools that's slightly fragmented
# 2. Take checkpoint
# 3. Do more random writes to "free" checkpointed blocks
# 4. Start discarding checkpoint
# 5. Export pool while discarding checkpoint
# 6. Attempt to rewind (should fail)
# 7. Import pool and ensure that discard is still running
# 8. Attempt to run checkpoint commands, or commands that
# change the pool's config (should fail)
#
verify_runnable "global"
+log_unsupported "Skipping, issue https://github.com/openzfs/zfs/issues/12053"
+
function test_cleanup
{
# reset memory limit to 16M
set_tunable64 SPA_DISCARD_MEMORY_LIMIT 1000000
cleanup_nested_pools
}
setup_nested_pool_state
log_onexit test_cleanup
#
# Force discard to happen slower so it spans over
# multiple txgs.
#
# Set memory limit to 128 bytes. Assuming that we
# use 64-bit words for encoding space map entries,
# ZFS will discard 8 non-debug entries per txg
# (so at most 16 space map entries in debug-builds
# due to debug entries).
#
# That should give us more than enough txgs to be
# discarding the checkpoint for a long time as with
# the current setup the checkpoint space maps should
# have tens of thousands of entries.
#
# Note: If two-words entries are used in the space
# map, we should have even more time to
# verify this.
#
set_tunable64 SPA_DISCARD_MEMORY_LIMIT 128
log_must zpool checkpoint $NESTEDPOOL
fragment_after_checkpoint_and_verify
log_must zpool checkpoint -d $NESTEDPOOL
log_must zpool export $NESTEDPOOL
#
# Verify on-disk state while pool is exported
#
log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL
#
# Attempt to rewind on a pool that is discarding
# a checkpoint.
#
log_mustnot zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL
log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
#
# Discarding should continue after import, so
# all the following operations should fail.
#
log_mustnot zpool checkpoint $NESTEDPOOL
log_mustnot zpool checkpoint -d $NESTEDPOOL
log_mustnot zpool remove $NESTEDPOOL $FILEDISK1
log_mustnot zpool reguid $NESTEDPOOL
# reset memory limit to 16M
set_tunable64 SPA_DISCARD_MEMORY_LIMIT 16777216
nested_wait_discard_finish
log_must zpool export $NESTEDPOOL
log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL
log_pass "Can export/import but not rewind/checkpoint/discard or " \
"change pool's config while discarding."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
index 9ebd5b149118..619d8d0e8f07 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh
@@ -1,96 +1,105 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
#
# DESCRIPTION:
# Verify that a zvol Force Unit Access (FUA) write works.
#
# STRATEGY:
# 1. dd write 5MB of data with "oflag=dsync,direct" to a zvol. Those flags
# together do a FUA write.
# 3. Verify the data is correct.
# 3. Repeat 1-2 for both the blk-mq and non-blk-mq cases.
verify_runnable "global"
if ! is_physical_device $DISKS; then
log_unsupported "This directory cannot be run on raw files."
fi
if ! is_linux ; then
log_unsupported "Only linux supports dd with oflag=dsync for FUA writes"
+else
+ if [[ $(linux_version) -gt $(linux_version "6.2") ]]; then
+ log_unsupported "Disabled while issue #14872 is being worked"
+ fi
+
+ # Disabled for the CentOS 9 kernel
+ if [[ $(linux_version) -eq $(linux_version "5.14") ]]; then
+ log_unsupported "Disabled while issue #14872 is being worked"
+ fi
fi
typeset datafile1="$(mktemp zvol_misc_fua1.XXXXXX)"
typeset datafile2="$(mktemp zvol_misc_fua2.XXXXXX)"
typeset zvolpath=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
function cleanup
{
rm "$datafile1" "$datafile2"
}
function do_test {
# Wait for udev to create symlinks to our zvol
block_device_wait $zvolpath
# Create a data file
log_must dd if=/dev/urandom of="$datafile1" bs=1M count=5
# Write the data to our zvol using FUA
log_must dd if=$datafile1 of=$zvolpath oflag=dsync,direct bs=1M count=5
# Extract data from our zvol
log_must dd if=$zvolpath of="$datafile2" bs=1M count=5
# Compare the data we expect with what's on our zvol. diff will return
# non-zero if they differ.
log_must diff $datafile1 $datafile2
log_must rm $datafile1 $datafile2
}
log_assert "Verify that a ZFS volume can do Force Unit Access (FUA)"
log_onexit cleanup
log_must zfs set compression=off $TESTPOOL/$TESTVOL
log_note "Testing without blk-mq"
set_blk_mq 0
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
do_test
set_blk_mq 1
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
do_test
log_pass "ZFS volume FUA works"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_trim.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_trim.ksh
index 46cac3ecb6c2..c0b191aafd45 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_trim.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_trim.ksh
@@ -1,137 +1,145 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/math.shlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
#
# DESCRIPTION:
# Verify we can TRIM a zvol
#
# STRATEGY:
# 1. TRIM the entire zvol to remove data from older tests
# 2. Create a 5MB data file
# 3. Write the file to the zvol
# 4. Observe 5MB of used space on the zvol
# 5. TRIM the first 1MB and last 2MB of the 5MB block of data.
# 6. Observe 2MB of used space on the zvol
# 7. Verify the trimmed regions are zero'd on the zvol
verify_runnable "global"
if is_linux ; then
+ if [[ $(linux_version) -gt $(linux_version "6.2") ]]; then
+ log_unsupported "Disabled while issue #14872 is being worked"
+ fi
+
+ # Disabled for the CentOS 9 kernel
+ if [[ $(linux_version) -eq $(linux_version "5.14") ]]; then
+ log_unsupported "Disabled while issue #14872 is being worked"
+ fi
+
# We need '--force' here since the prior tests may leave a filesystem
# on the zvol, and blkdiscard will see that filesystem and print a
# warning unless you force it.
#
# Only blkdiscard >= v2.36 supports --force, so we need to
# check for it.
if blkdiscard --help | grep -q '\-\-force' ; then
trimcmd='blkdiscard --force'
else
trimcmd='blkdiscard'
fi
else
# By default, FreeBSD 'trim' always does a dry-run. '-f' makes
# it perform the actual operation.
trimcmd='trim -f'
fi
if ! is_physical_device $DISKS; then
log_unsupported "This directory cannot be run on raw files."
fi
typeset datafile1="$(mktemp zvol_misc_flags1.XXXXXX)"
typeset datafile2="$(mktemp zvol_misc_flags2.XXXXXX)"
typeset zvolpath=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
function cleanup
{
rm "$datafile1" "$datafile2"
}
function do_test {
# Wait for udev to create symlinks to our zvol
block_device_wait $zvolpath
# Create a data file
log_must dd if=/dev/urandom of="$datafile1" bs=1M count=5
# Write to zvol
log_must dd if=$datafile1 of=$zvolpath conv=fsync
sync_pool
# Record how much space we've used (should be 5MB, with 128k
# of tolerance).
before="$(get_prop refer $TESTPOOL/$TESTVOL)"
log_must within_tolerance $before 5242880 131072
# We currently have 5MB of random data on the zvol.
# Trim the first 1MB and also trim 2MB at offset 3MB.
log_must $trimcmd -l $((1 * 1048576)) $zvolpath
log_must $trimcmd -o $((3 * 1048576)) -l $((2 * 1048576)) $zvolpath
sync_pool
# After trimming 3MB, the zvol should have 2MB of data (with 128k of
# tolerance).
after="$(get_prop refer $TESTPOOL/$TESTVOL)"
log_must within_tolerance $after 2097152 131072
# Make the same holes in our test data
log_must dd if=/dev/zero of="$datafile1" bs=1M count=1 conv=notrunc
log_must dd if=/dev/zero of="$datafile1" bs=1M count=2 seek=3 conv=notrunc
# Extract data from our zvol
log_must dd if=$zvolpath of="$datafile2" bs=1M count=5
# Compare the data we expect with what's on our zvol. diff will return
# non-zero if they differ.
log_must diff $datafile1 $datafile2
log_must rm $datafile1 $datafile2
}
log_assert "Verify that a ZFS volume can be TRIMed"
log_onexit cleanup
log_must zfs set compression=off $TESTPOOL/$TESTVOL
# Remove old data from previous tests
log_must $trimcmd $zvolpath
-
set_blk_mq 1
log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
do_test
set_blk_mq 0
log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
do_test
log_pass "ZFS volumes can be trimmed"
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index c72becff924e..2cfdf06f8f01 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -1,529 +1,528 @@
# $FreeBSD$
SRCDIR=${SRCTOP}/sys/contrib/openzfs/module
INCDIR=${SRCTOP}/sys/contrib/openzfs/include
KMOD= zfs
.PATH: ${SRCDIR}/avl \
${SRCDIR}/lua \
${SRCDIR}/nvpair \
${SRCDIR}/icp/algs/blake3 \
${SRCDIR}/icp/algs/edonr \
${SRCDIR}/icp/algs/sha2 \
${SRCDIR}/icp/asm-aarch64/blake3 \
${SRCDIR}/icp/asm-aarch64/sha2 \
${SRCDIR}/icp/asm-arm/sha2 \
${SRCDIR}/icp/asm-ppc64/sha2 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
${SRCDIR}/icp/asm-x86_64/sha2 \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
${SRCDIR}/unicode \
${SRCDIR}/zcommon \
${SRCDIR}/zfs \
${SRCDIR}/zstd \
${SRCDIR}/zstd/lib/common \
${SRCDIR}/zstd/lib/compress \
${SRCDIR}/zstd/lib/decompress
CFLAGS+= -I${INCDIR}
CFLAGS+= -I${SRCDIR}/icp/include
CFLAGS+= -I${INCDIR}/os/freebsd
CFLAGS+= -I${INCDIR}/os/freebsd/spl
CFLAGS+= -I${INCDIR}/os/freebsd/zfs
CFLAGS+= -I${SRCDIR}/zstd/include
CFLAGS+= -I${.CURDIR}
CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS \
-DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \
-D_SYS_VMEM_H_ -DIN_FREEBSD_BASE
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
-DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
.endif
SRCS= vnode_if.h device_if.h bus_if.h
# avl
SRCS+= avl.c
# icp
SRCS+= edonr.c
#icp/algs/blake3
SRCS+= blake3.c \
blake3_generic.c \
blake3_impl.c
.if ${MACHINE_ARCH} == "aarch64"
#icp/asm-aarch64/blake3
SRCS+= b3_aarch64_sse2.S \
b3_aarch64_sse41.S
.endif
.if ${MACHINE_ARCH} == "powerpc64le"
#icp/asm-ppc64/blake3
SRCS+= b3_ppc64le_sse2.S \
b3_ppc64le_sse41.S
.endif
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
#icp/asm-x86_64/blake3
SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
.endif
#icp/algs/sha2
SRCS+= sha2_generic.c \
sha256_impl.c \
sha512_impl.c
.if ${MACHINE_ARCH} == "armv7"
#icp/asm-arm/sha2
SRCS+= sha256-armv7.S \
sha512-armv7.S
.endif
.if ${MACHINE_ARCH} == "aarch64"
#icp/asm-aarch64/sha2
OBJS+= zfs-sha256-armv8.o \
zfs-sha512-armv8.o
.endif
.if ${MACHINE_ARCH} == "powerpc64" || ${MACHINE_ARCH} == "powerpc64le"
#icp/asm-ppc64/sha2
SRCS+= sha256-p8.S \
sha512-p8.S \
sha256-ppc.S \
sha512-ppc.S
.endif
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
#icp/asm-x86_64/sha2
OBJS+= zfs-sha256-x86_64.o \
zfs-sha512-x86_64.o
.endif
#lua
SRCS+= lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
#nvpair
SRCS+= nvpair.c \
fnvpair.c \
nvpair_alloc_spl.c \
nvpair_alloc_fixed.c
#os/freebsd/spl
SRCS+= acl_common.c \
callb.c \
list.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
spl_kmem.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_procfs_list.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
spl_taskq.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zlib.c \
spl_zone.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
#os/freebsd/zfs
SRCS+= abd_os.c \
arc_os.c \
crypto_os.c \
dmu_os.c \
event_os.c \
hkdf.c \
kmod_core.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_geom.c \
vdev_label_os.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_debug.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_racct.c \
zfs_vfsops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
#unicode
SRCS+= uconv.c \
u8_textprep.c
#zcommon
SRCS+= zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
#zfs
SRCS+= abd.c \
aggsum.c \
arc.c \
blake3_zfs.c \
blkptr.c \
bplist.c \
bpobj.c \
brt.c \
btree.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
bptree.c \
bqueue.c \
dataset_kstats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_bookmark.c \
dsl_dir.c \
dsl_crypt.c \
dsl_destroy.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_userhold.c \
edonr_zfs.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
lz4_zfs.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha2_zfs.c \
skein_zfs.c \
spa.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
uberblock.c \
unique.c \
vdev.c \
- vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_indirect.c \
vdev_indirect_births.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_chksum.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_impl.c \
zfs_ioctl.c \
zfs_log.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_replay.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_inject.c \
zle.c \
zrlock.c \
zthr.c \
zvol.c
#zstd
SRCS+= zfs_zstd.c \
entropy_common.c \
error_private.c \
fse_compress.c \
fse_decompress.c \
hist.c \
huf_compress.c \
huf_decompress.c \
pool.c \
xxhash.c \
zstd_common.c \
zstd_compress.c \
zstd_compress_literals.c \
zstd_compress_sequences.c \
zstd_compress_superblock.c \
zstd_ddict.c \
zstd_decompress.c \
zstd_decompress_block.c \
zstd_double_fast.c \
zstd_fast.c \
zstd_lazy.c \
zstd_ldm.c \
zstd_opt.c
.include <bsd.kmod.mk>
CFLAGS+= -include ${SRCTOP}/sys/cddl/compat/opensolaris/sys/debug_compat.h
CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -include ${SRCTOP}/sys/modules/zfs/static_ccompile.h
CFLAGS.sysctl_os.c= -include ${SRCTOP}/sys/modules/zfs/zfs_config.h
CFLAGS.xxhash.c+= -include ${SRCTOP}/sys/sys/_null.h
CFLAGS.gcc+= -Wno-pointer-to-int-cast
CFLAGS.abd.c= -Wno-cast-qual
CFLAGS.ddt.c= -Wno-cast-qual
CFLAGS.dmu.c= -Wno-cast-qual
CFLAGS.dmu_traverse.c= -Wno-cast-qual
CFLAGS.dnode.c= ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.dsl_deadlist.c= -Wno-cast-qual
CFLAGS.dsl_dir.c= -Wno-cast-qual
CFLAGS.dsl_prop.c= -Wno-cast-qual
CFLAGS.edonr.c= -Wno-cast-qual
CFLAGS.fm.c= -Wno-cast-qual
CFLAGS.hist.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.lapi.c= -Wno-cast-qual
CFLAGS.lcompat.c= -Wno-cast-qual
CFLAGS.ldo.c= ${NO_WINFINITE_RECURSION}
CFLAGS.lobject.c= -Wno-cast-qual
CFLAGS.ltable.c= -Wno-cast-qual
CFLAGS.lvm.c= -Wno-cast-qual
CFLAGS.lz4.c= -Wno-cast-qual
CFLAGS.lz4_zfs.c= -Wno-cast-qual
CFLAGS.nvpair.c= -Wno-cast-qual -DHAVE_RPC_TYPES ${NO_WSTRINGOP_OVERREAD}
CFLAGS.pool.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.pool.c= -U__BMI__ -fno-tree-vectorize
CFLAGS.spa.c= -Wno-cast-qual
CFLAGS.spa_misc.c= -Wno-cast-qual
CFLAGS.spl_string.c= -Wno-cast-qual
CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.vdev_draid.c= -Wno-cast-qual
CFLAGS.vdev_raidz.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.zap_leaf.c= -Wno-cast-qual
CFLAGS.zap_micro.c= -Wno-cast-qual
CFLAGS.zcp.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fm.c= -Wno-cast-qual ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zfs_ioctl.c= -Wno-cast-qual
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zprop_common.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual
#zstd
CFLAGS.entropy_common.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.error_private.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.fse_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL} ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.fse_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.huf_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.xxhash.c+= -U__BMI__ -fno-tree-vectorize
CFLAGS.xxhash.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_common.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_literals.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_sequences.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_compress_superblock.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL} ${NO_WUNUSED_BUT_SET_VARIABLE}
CFLAGS.zstd_ddict.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_decompress_block.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_double_fast.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_fast.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_lazy.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_ldm.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
CFLAGS.zstd_opt.c= -U__BMI__ -fno-tree-vectorize ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
.if ${MACHINE_ARCH} == "aarch64"
__ZFS_ZSTD_AARCH64_FLAGS= -include ${SRCDIR}/zstd/include/aarch64_compat.h
CFLAGS.zstd.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.entropy_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.error_private.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.fse_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.hist.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.huf_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.pool.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.xxhash.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_common.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_literals.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_sequences.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_compress_superblock.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ddict.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_decompress_block.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_double_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_fast.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_lazy.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ldm.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_opt.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
b3_aarch64_sse2.o: b3_aarch64_sse2.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
b3_aarch64_sse41.o: b3_aarch64_sse41.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
zfs-sha256-armv8.o: sha256-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} \
${SRCDIR}/icp/asm-aarch64/sha2/sha256-armv8.S \
-o ${.TARGET}
${CTFCONVERT_CMD}
zfs-sha512-armv8.o: sha512-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} \
${SRCDIR}/icp/asm-aarch64/sha2/sha512-armv8.S \
-o ${.TARGET}
${CTFCONVERT_CMD}
.endif
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
zfs-sha256-x86_64.o: sha256-x86_64.S
${CC} -c ${CFLAGS} ${WERROR} \
${SRCDIR}/icp/asm-x86_64/sha2/sha256-x86_64.S \
-o ${.TARGET}
${CTFCONVERT_CMD}
zfs-sha512-x86_64.o: sha512-x86_64.S
${CC} -c ${CFLAGS} ${WERROR} \
${SRCDIR}/icp/asm-x86_64/sha2/sha512-x86_64.S \
-o ${.TARGET}
${CTFCONVERT_CMD}
.endif
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index d0ad587ed327..dc40483fe785 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,1087 +1,1090 @@
/*
* $FreeBSD$
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* add_disk() returns int */
/* #undef HAVE_ADD_DISK_RET */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
/* Define if you have [rt] */
#define HAVE_AIO_H 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdevname() is available */
/* #undef HAVE_BDEVNAME */
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_63 */
/* bdev_*_io_acct() available */
/* #undef HAVE_BDEV_IO_ACCT_OLD */
/* bdev_kobj() exists */
/* #undef HAVE_BDEV_KOBJ */
/* bdev_max_discard_sectors() is available */
/* #undef HAVE_BDEV_MAX_DISCARD_SECTORS */
/* bdev_max_secure_erase_sectors() is available */
/* #undef HAVE_BDEV_MAX_SECURE_ERASE_SECTORS */
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio_alloc() takes 4 arguments */
/* #undef HAVE_BIO_ALLOC_4ARG */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* blkdev_issue_discard() is available */
/* #undef HAVE_BLKDEV_ISSUE_DISCARD */
/* blkdev_issue_secure_erase() is available */
/* #undef HAVE_BLKDEV_ISSUE_SECURE_ERASE */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk_cleanup_disk() exists */
/* #undef HAVE_BLK_CLEANUP_DISK */
/* block multiqueue is available */
/* #undef HAVE_BLK_MQ */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_discard() is available */
/* #undef HAVE_BLK_QUEUE_DISCARD */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* cpu_has_feature() is GPL-only */
/* #undef HAVE_CPU_HAS_FEATURE_GPL_ONLY */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* dentry aliases are in d_u member */
/* #undef HAVE_DENTRY_D_U_ALIASES */
/* dequeue_signal() takes 4 arguments */
/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* FALLOC_FL_ZERO_RANGE is defined */
/* #undef HAVE_FALLOC_FL_ZERO_RANGE */
/* fault_in_iov_iter_readable() is available */
/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
/* filemap_range_has_page() is available */
/* #undef HAVE_FILEMAP_RANGE_HAS_PAGE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* fops->fadvise() exists */
/* #undef HAVE_FILE_FADVISE */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* flush_dcache_page() is GPL-only */
/* #undef HAVE_FLUSH_DCACHE_PAGE_GPL_ONLY */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* Define if compiler supports -Wformat-overflow */
/* #undef HAVE_FORMAT_OVERFLOW */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* yes */
/* #undef HAVE_GENERIC_FADVISE */
/* generic_fillattr requires struct mnt_idmap* */
/* #undef HAVE_GENERIC_FILLATTR_IDMAP */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* has iops->get_inode_acl() */
/* #undef HAVE_GET_INODE_ACL */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* iattr->ia_vfsuid and iattr->ia_vfsgid exist */
/* #undef HAVE_IATTR_VFSID */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* iops->getattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_GETATTR */
/* iops->setattr() takes struct mnt_idmap* */
/* #undef HAVE_IDMAP_IOPS_SETATTR */
/* APIs for idmapped mount are present */
/* #undef HAVE_IDMAP_MNT_API */
/* Define if compiler supports -Wimplicit-fallthrough */
/* #undef HAVE_IMPLICIT_FALLTHROUGH */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_INFINITE_RECURSION */
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes mnt_idmap */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAP */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_USERNS */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_CREATE_IDMAP */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKDIR_IDMAP */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_MKNOD_IDMAP */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->permission() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_PERMISSION_IDMAP */
/* iops->permission() takes struct user_namespace* */
/* #undef HAVE_IOPS_PERMISSION_USERNS */
/* iops->rename() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_RENAME_IDMAP */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->setattr() exists */
/* #undef HAVE_IOPS_SETATTR */
/* iops->symlink() takes struct mnt_idmap* */
/* #undef HAVE_IOPS_SYMLINK_IDMAP */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/internal.h */
/* #undef HAVE_KERNEL_FPU_INTERNAL_HEADER */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* Define if compiler supports -Winfinite-recursion */
/* #undef HAVE_KERNEL_INFINITE_RECURSION */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* linux/blk-cgroup.h exists */
/* #undef HAVE_LINUX_BLK_CGROUP_HEADER */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* part_to_dev() exists */
/* #undef HAVE_PART_TO_DEV */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
+/* struct reclaim_state has reclaimed */
+/* #undef HAVE_RECLAIM_STATE_RECLAIMED */
+
/* register_shrinker is vararg */
/* #undef HAVE_REGISTER_SHRINKER_VARARG */
/* iops->rename2() exists */
/* #undef HAVE_RENAME2 */
/* struct inode_operations_wrapper takes .rename2() */
/* #undef HAVE_RENAME2_OPERATIONS_WRAPPER */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() accepts mnt_idmap */
/* #undef HAVE_SETATTR_PREPARE_IDMAP */
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args, arg1 is struct mnt_idmap * */
/* #undef HAVE_SET_ACL_IDMAP_DENTRY */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* iops->set_acl() takes 4 args, arg2 is struct dentry * */
/* #undef HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* cs->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* struct kobj_type has default_groups */
/* #undef HAVE_SYSFS_DEFAULT_GROUPS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() uses old dentry signature */
/* #undef HAVE_TMPFILE_DENTRY */
/* i_op->tmpfile() has mnt_idmap */
/* #undef HAVE_TMPFILE_IDMAP */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
/* user_namespace->ns.inum exists */
/* #undef HAVE_USER_NS_COMMON_INUM */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* filemap_dirty_folio exists */
/* #undef HAVE_VFS_FILEMAP_DIRTY_FOLIO */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* address_space_operations->readpages exists */
/* #undef HAVE_VFS_READPAGES */
/* read_folio exists */
/* #undef HAVE_VFS_READ_FOLIO */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants dentry and inode and flags */
/* #undef HAVE_XATTR_GET_DENTRY_INODE_FLAGS */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes mnt_idmap */
/* #undef HAVE_XATTR_SET_IDMAP */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if host toolchain supports XSAVE */
#define HAVE_XSAVE 1
/* Define if host toolchain supports XSAVEOPT */
#define HAVE_XSAVEOPT 1
/* Define if host toolchain supports XSAVES */
#define HAVE_XSAVES 1
/* ZERO_PAGE() is GPL-only */
/* #undef HAVE_ZERO_PAGE_GPL_ONLY */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* using complete_and_exit() instead */
/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* pde_data() is PDE_DATA() */
/* #undef SPL_PDE_DATA */
/* Define to 1 if all of the C90 standard headers exist (not just the ones
required in a freestanding environment). This macro is provided for
backward compatibility; new code need not use it. */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* Version number of package */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* GENHD_FL_EXT_DEVT flag is not available */
/* #undef ZFS_GENHD_FL_EXT_DEVT */
/* GENHD_FL_NO_PART_SCAN flag is available */
/* #undef ZFS_GENHD_FL_NO_PART */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_gad0a55461"
+#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_gfeff9dfed"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.2"
+#define ZFS_META_KVER_MAX "6.3"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_gad0a55461"
+#define ZFS_META_RELEASE "FreeBSD_gfeff9dfed"
/* Define the project version. */
#define ZFS_META_VERSION "2.1.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 5c7ebbec1dc5..932bf9730c7a 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.1.99-1955-gad0a55461"
+#define ZFS_META_GITREV "zfs-2.1.99-1993-gfeff9dfed"

File Metadata

Mime Type
application/octet-stream
Expires
Sun, Jun 30, 5:32 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
PWBW4FpdONNh
Default Alt Text
(4 MB)

Event Timeline